Addeded vendor/

This commit is contained in:
Martin Dosch 2019-05-31 09:10:48 +02:00 committed by Martin Dosch
parent 2f94e857d4
commit 08030af4ae
14 changed files with 9821 additions and 0 deletions

761
vendor/github.com/mmcdole/gofeed/atom/parser.go generated vendored Normal file
View file

@ -0,0 +1,761 @@
package atom
import (
"encoding/base64"
"io"
"strings"
"github.com/PuerkitoBio/goquery"
ext "github.com/mmcdole/gofeed/extensions"
"github.com/mmcdole/gofeed/internal/shared"
xpp "github.com/mmcdole/goxpp"
)
var (
// Atom elements which contain URIs
// https://tools.ietf.org/html/rfc4287
uriElements = map[string]bool{
"icon": true,
"id": true,
"logo": true,
"uri": true,
"url": true, // atom 0.3
}
// Atom attributes which contain URIs
// https://tools.ietf.org/html/rfc4287
atomURIAttrs = map[string]bool{
"href": true,
"scheme": true,
"src": true,
"uri": true,
}
)
// Parser is an Atom Parser
type Parser struct {
base *shared.XMLBase
}
// Parse parses an xml feed into an atom.Feed
func (ap *Parser) Parse(feed io.Reader) (*Feed, error) {
p := xpp.NewXMLPullParser(feed, false, shared.NewReaderLabel)
ap.base = &shared.XMLBase{URIAttrs: atomURIAttrs}
_, err := ap.base.FindRoot(p)
if err != nil {
return nil, err
}
return ap.parseRoot(p)
}
func (ap *Parser) parseRoot(p *xpp.XMLPullParser) (*Feed, error) {
if err := p.Expect(xpp.StartTag, "feed"); err != nil {
return nil, err
}
atom := &Feed{}
atom.Entries = []*Entry{}
atom.Version = ap.parseVersion(p)
atom.Language = ap.parseLanguage(p)
contributors := []*Person{}
authors := []*Person{}
categories := []*Category{}
links := []*Link{}
extensions := ext.Extensions{}
for {
tok, err := ap.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if shared.IsExtension(p) {
e, err := shared.ParseExtension(extensions, p)
if err != nil {
return nil, err
}
extensions = e
} else if name == "title" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
atom.Title = result
} else if name == "id" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
atom.ID = result
} else if name == "updated" ||
name == "modified" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
atom.Updated = result
date, err := shared.ParseDate(result)
if err == nil {
utcDate := date.UTC()
atom.UpdatedParsed = &utcDate
}
} else if name == "subtitle" ||
name == "tagline" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
atom.Subtitle = result
} else if name == "link" {
result, err := ap.parseLink(p)
if err != nil {
return nil, err
}
links = append(links, result)
} else if name == "generator" {
result, err := ap.parseGenerator(p)
if err != nil {
return nil, err
}
atom.Generator = result
} else if name == "icon" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
atom.Icon = result
} else if name == "logo" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
atom.Logo = result
} else if name == "rights" ||
name == "copyright" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
atom.Rights = result
} else if name == "contributor" {
result, err := ap.parsePerson("contributor", p)
if err != nil {
return nil, err
}
contributors = append(contributors, result)
} else if name == "author" {
result, err := ap.parsePerson("author", p)
if err != nil {
return nil, err
}
authors = append(authors, result)
} else if name == "category" {
result, err := ap.parseCategory(p)
if err != nil {
return nil, err
}
categories = append(categories, result)
} else if name == "entry" {
result, err := ap.parseEntry(p)
if err != nil {
return nil, err
}
atom.Entries = append(atom.Entries, result)
} else {
err := p.Skip()
if err != nil {
return nil, err
}
}
}
}
if len(categories) > 0 {
atom.Categories = categories
}
if len(authors) > 0 {
atom.Authors = authors
}
if len(contributors) > 0 {
atom.Contributors = contributors
}
if len(links) > 0 {
atom.Links = links
}
if len(extensions) > 0 {
atom.Extensions = extensions
}
if err := p.Expect(xpp.EndTag, "feed"); err != nil {
return nil, err
}
return atom, nil
}
func (ap *Parser) parseEntry(p *xpp.XMLPullParser) (*Entry, error) {
if err := p.Expect(xpp.StartTag, "entry"); err != nil {
return nil, err
}
entry := &Entry{}
contributors := []*Person{}
authors := []*Person{}
categories := []*Category{}
links := []*Link{}
extensions := ext.Extensions{}
for {
tok, err := ap.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if shared.IsExtension(p) {
e, err := shared.ParseExtension(extensions, p)
if err != nil {
return nil, err
}
extensions = e
} else if name == "title" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
entry.Title = result
} else if name == "id" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
entry.ID = result
} else if name == "rights" ||
name == "copyright" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
entry.Rights = result
} else if name == "summary" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
entry.Summary = result
} else if name == "source" {
result, err := ap.parseSource(p)
if err != nil {
return nil, err
}
entry.Source = result
} else if name == "updated" ||
name == "modified" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
entry.Updated = result
date, err := shared.ParseDate(result)
if err == nil {
utcDate := date.UTC()
entry.UpdatedParsed = &utcDate
}
} else if name == "contributor" {
result, err := ap.parsePerson("contributor", p)
if err != nil {
return nil, err
}
contributors = append(contributors, result)
} else if name == "author" {
result, err := ap.parsePerson("author", p)
if err != nil {
return nil, err
}
authors = append(authors, result)
} else if name == "category" {
result, err := ap.parseCategory(p)
if err != nil {
return nil, err
}
categories = append(categories, result)
} else if name == "link" {
result, err := ap.parseLink(p)
if err != nil {
return nil, err
}
links = append(links, result)
} else if name == "published" ||
name == "issued" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
entry.Published = result
date, err := shared.ParseDate(result)
if err == nil {
utcDate := date.UTC()
entry.PublishedParsed = &utcDate
}
} else if name == "content" {
result, err := ap.parseContent(p)
if err != nil {
return nil, err
}
entry.Content = result
} else {
err := p.Skip()
if err != nil {
return nil, err
}
}
}
}
if len(categories) > 0 {
entry.Categories = categories
}
if len(authors) > 0 {
entry.Authors = authors
}
if len(links) > 0 {
entry.Links = links
}
if len(contributors) > 0 {
entry.Contributors = contributors
}
if len(extensions) > 0 {
entry.Extensions = extensions
}
if err := p.Expect(xpp.EndTag, "entry"); err != nil {
return nil, err
}
return entry, nil
}
func (ap *Parser) parseSource(p *xpp.XMLPullParser) (*Source, error) {
if err := p.Expect(xpp.StartTag, "source"); err != nil {
return nil, err
}
source := &Source{}
contributors := []*Person{}
authors := []*Person{}
categories := []*Category{}
links := []*Link{}
extensions := ext.Extensions{}
for {
tok, err := ap.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if shared.IsExtension(p) {
e, err := shared.ParseExtension(extensions, p)
if err != nil {
return nil, err
}
extensions = e
} else if name == "title" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
source.Title = result
} else if name == "id" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
source.ID = result
} else if name == "updated" ||
name == "modified" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
source.Updated = result
date, err := shared.ParseDate(result)
if err == nil {
utcDate := date.UTC()
source.UpdatedParsed = &utcDate
}
} else if name == "subtitle" ||
name == "tagline" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
source.Subtitle = result
} else if name == "link" {
result, err := ap.parseLink(p)
if err != nil {
return nil, err
}
links = append(links, result)
} else if name == "generator" {
result, err := ap.parseGenerator(p)
if err != nil {
return nil, err
}
source.Generator = result
} else if name == "icon" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
source.Icon = result
} else if name == "logo" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
source.Logo = result
} else if name == "rights" ||
name == "copyright" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
source.Rights = result
} else if name == "contributor" {
result, err := ap.parsePerson("contributor", p)
if err != nil {
return nil, err
}
contributors = append(contributors, result)
} else if name == "author" {
result, err := ap.parsePerson("author", p)
if err != nil {
return nil, err
}
authors = append(authors, result)
} else if name == "category" {
result, err := ap.parseCategory(p)
if err != nil {
return nil, err
}
categories = append(categories, result)
} else {
err := p.Skip()
if err != nil {
return nil, err
}
}
}
}
if len(categories) > 0 {
source.Categories = categories
}
if len(authors) > 0 {
source.Authors = authors
}
if len(contributors) > 0 {
source.Contributors = contributors
}
if len(links) > 0 {
source.Links = links
}
if len(extensions) > 0 {
source.Extensions = extensions
}
if err := p.Expect(xpp.EndTag, "source"); err != nil {
return nil, err
}
return source, nil
}
func (ap *Parser) parseContent(p *xpp.XMLPullParser) (*Content, error) {
c := &Content{}
c.Type = p.Attribute("type")
c.Src = p.Attribute("src")
text, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
c.Value = text
return c, nil
}
func (ap *Parser) parsePerson(name string, p *xpp.XMLPullParser) (*Person, error) {
if err := p.Expect(xpp.StartTag, name); err != nil {
return nil, err
}
person := &Person{}
for {
tok, err := ap.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if name == "name" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
person.Name = result
} else if name == "email" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
person.Email = result
} else if name == "uri" ||
name == "url" ||
name == "homepage" {
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
person.URI = result
} else {
err := p.Skip()
if err != nil {
return nil, err
}
}
}
}
if err := p.Expect(xpp.EndTag, name); err != nil {
return nil, err
}
return person, nil
}
func (ap *Parser) parseLink(p *xpp.XMLPullParser) (*Link, error) {
if err := p.Expect(xpp.StartTag, "link"); err != nil {
return nil, err
}
l := &Link{}
l.Href = p.Attribute("href")
l.Hreflang = p.Attribute("hreflang")
l.Type = p.Attribute("type")
l.Length = p.Attribute("length")
l.Title = p.Attribute("title")
l.Rel = p.Attribute("rel")
if l.Rel == "" {
l.Rel = "alternate"
}
if err := p.Skip(); err != nil {
return nil, err
}
if err := p.Expect(xpp.EndTag, "link"); err != nil {
return nil, err
}
return l, nil
}
func (ap *Parser) parseCategory(p *xpp.XMLPullParser) (*Category, error) {
if err := p.Expect(xpp.StartTag, "category"); err != nil {
return nil, err
}
c := &Category{}
c.Term = p.Attribute("term")
c.Scheme = p.Attribute("scheme")
c.Label = p.Attribute("label")
if err := p.Skip(); err != nil {
return nil, err
}
if err := p.Expect(xpp.EndTag, "category"); err != nil {
return nil, err
}
return c, nil
}
func (ap *Parser) parseGenerator(p *xpp.XMLPullParser) (*Generator, error) {
if err := p.Expect(xpp.StartTag, "generator"); err != nil {
return nil, err
}
g := &Generator{}
uri := p.Attribute("uri") // Atom 1.0
url := p.Attribute("url") // Atom 0.3
if uri != "" {
g.URI = uri
} else if url != "" {
g.URI = url
}
g.Version = p.Attribute("version")
result, err := ap.parseAtomText(p)
if err != nil {
return nil, err
}
g.Value = result
if err := p.Expect(xpp.EndTag, "generator"); err != nil {
return nil, err
}
return g, nil
}
func (ap *Parser) parseAtomText(p *xpp.XMLPullParser) (string, error) {
var text struct {
Type string `xml:"type,attr"`
Mode string `xml:"mode,attr"`
InnerXML string `xml:",innerxml"`
}
err := p.DecodeElement(&text)
if err != nil {
return "", err
}
result := text.InnerXML
result = strings.TrimSpace(result)
lowerType := strings.ToLower(text.Type)
lowerMode := strings.ToLower(text.Mode)
if strings.Contains(result, "<![CDATA[") {
result = shared.StripCDATA(result)
if lowerType == "html" || strings.Contains(lowerType, "xhtml") {
result, _ = ap.base.ResolveHTML(result)
}
} else {
// decode non-CDATA contents depending on type
if lowerType == "text" ||
strings.HasPrefix(lowerType, "text/") ||
(lowerType == "" && lowerMode == "") {
result, err = shared.DecodeEntities(result)
} else if strings.Contains(lowerType, "xhtml") {
result = ap.stripWrappingDiv(result)
result, _ = ap.base.ResolveHTML(result)
} else if lowerType == "html" {
result = ap.stripWrappingDiv(result)
result, err = shared.DecodeEntities(result)
if err == nil {
result, _ = ap.base.ResolveHTML(result)
}
} else {
decodedStr, err := base64.StdEncoding.DecodeString(result)
if err == nil {
result = string(decodedStr)
}
}
}
// resolve relative URIs in URI-containing elements according to xml:base
name := strings.ToLower(p.Name)
if uriElements[name] {
resolved, err := ap.base.ResolveURL(result)
if err == nil {
result = resolved
}
}
return result, err
}
func (ap *Parser) parseLanguage(p *xpp.XMLPullParser) string {
return p.Attribute("lang")
}
func (ap *Parser) parseVersion(p *xpp.XMLPullParser) string {
ver := p.Attribute("version")
if ver != "" {
return ver
}
ns := p.Attribute("xmlns")
if ns == "http://purl.org/atom/ns#" {
return "0.3"
}
if ns == "http://www.w3.org/2005/Atom" {
return "1.0"
}
return ""
}
func (ap *Parser) stripWrappingDiv(content string) (result string) {
result = content
r := strings.NewReader(result)
doc, err := goquery.NewDocumentFromReader(r)
if err == nil {
root := doc.Find("body").Children()
if root.Is("div") && root.Siblings().Size() == 0 {
html, err := root.Unwrap().Html()
if err == nil {
result = html
}
}
}
return
}

48
vendor/github.com/mmcdole/gofeed/detector.go generated vendored Normal file
View file

@ -0,0 +1,48 @@
package gofeed
import (
"io"
"strings"
"github.com/mmcdole/gofeed/internal/shared"
xpp "github.com/mmcdole/goxpp"
)
// FeedType represents one of the possible feed
// types that we can detect.
type FeedType int
const (
// FeedTypeUnknown represents a feed that could not have its
// type determiend.
FeedTypeUnknown FeedType = iota
// FeedTypeAtom repesents an Atom feed
FeedTypeAtom
// FeedTypeRSS represents an RSS feed
FeedTypeRSS
)
// DetectFeedType attempts to determine the type of feed
// by looking for specific xml elements unique to the
// various feed types.
func DetectFeedType(feed io.Reader) FeedType {
p := xpp.NewXMLPullParser(feed, false, shared.NewReaderLabel)
xmlBase := shared.XMLBase{}
_, err := xmlBase.FindRoot(p)
if err != nil {
return FeedTypeUnknown
}
name := strings.ToLower(p.Name)
switch name {
case "rdf":
return FeedTypeRSS
case "rss":
return FeedTypeRSS
case "feed":
return FeedTypeAtom
default:
return FeedTypeUnknown
}
}

View file

@ -0,0 +1,194 @@
package shared
import (
"bytes"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
xpp "github.com/mmcdole/goxpp"
)
var (
emailNameRgx = regexp.MustCompile(`^([^@]+@[^\s]+)\s+\(([^@]+)\)$`)
nameEmailRgx = regexp.MustCompile(`^([^@]+)\s+\(([^@]+@[^)]+)\)$`)
nameOnlyRgx = regexp.MustCompile(`^([^@()]+)$`)
emailOnlyRgx = regexp.MustCompile(`^([^@()]+@[^@()]+)$`)
TruncatedEntity = errors.New("truncated entity")
InvalidNumericReference = errors.New("invalid numeric reference")
)
const CDATA_START = "<![CDATA["
const CDATA_END = "]]>"
// ParseText is a helper function for parsing the text
// from the current element of the XMLPullParser.
// This function can handle parsing naked XML text from
// an element.
func ParseText(p *xpp.XMLPullParser) (string, error) {
var text struct {
Type string `xml:"type,attr"`
InnerXML string `xml:",innerxml"`
}
err := p.DecodeElement(&text)
if err != nil {
return "", err
}
result := text.InnerXML
result = strings.TrimSpace(result)
if strings.Contains(result, CDATA_START) {
return StripCDATA(result), nil
}
return DecodeEntities(result)
}
// StripCDATA removes CDATA tags from the string
// content outside of CDATA tags is passed via DecodeEntities
func StripCDATA(str string) string {
buf := bytes.NewBuffer([]byte{})
curr := 0
for curr < len(str) {
start := indexAt(str, CDATA_START, curr)
if start == -1 {
dec, _ := DecodeEntities(str[curr:])
buf.Write([]byte(dec))
return buf.String()
}
end := indexAt(str, CDATA_END, start)
if end == -1 {
dec, _ := DecodeEntities(str[curr:])
buf.Write([]byte(dec))
return buf.String()
}
buf.Write([]byte(str[start+len(CDATA_START) : end]))
curr = curr + end + len(CDATA_END)
}
return buf.String()
}
// DecodeEntities decodes escaped XML entities
// in a string and returns the unescaped string
func DecodeEntities(str string) (string, error) {
data := []byte(str)
buf := bytes.NewBuffer([]byte{})
for len(data) > 0 {
// Find the next entity
idx := bytes.IndexByte(data, '&')
if idx == -1 {
buf.Write(data)
break
}
// Write and skip everything before it
buf.Write(data[:idx])
data = data[idx+1:]
if len(data) == 0 {
return "", TruncatedEntity
}
// Find the end of the entity
end := bytes.IndexByte(data, ';')
if end == -1 {
return "", TruncatedEntity
}
if data[0] == '#' {
// Numerical character reference
var str string
base := 10
if len(data) > 1 && data[1] == 'x' {
str = string(data[2:end])
base = 16
} else {
str = string(data[1:end])
}
i, err := strconv.ParseUint(str, base, 32)
if err != nil {
return "", InvalidNumericReference
}
buf.WriteRune(rune(i))
} else {
// Predefined entity
name := string(data[:end])
var c byte
switch name {
case "lt":
c = '<'
case "gt":
c = '>'
case "quot":
c = '"'
case "apos":
c = '\''
case "amp":
c = '&'
default:
return "", fmt.Errorf("unknown predefined "+
"entity &%s;", name)
}
buf.WriteByte(c)
}
// Skip the entity
data = data[end+1:]
}
return buf.String(), nil
}
// ParseNameAddress parses name/email strings commonly
// found in RSS feeds of the format "Example Name (example@site.com)"
// and other variations of this format.
func ParseNameAddress(nameAddressText string) (name string, address string) {
if nameAddressText == "" {
return
}
if emailNameRgx.MatchString(nameAddressText) {
result := emailNameRgx.FindStringSubmatch(nameAddressText)
address = result[1]
name = result[2]
} else if nameEmailRgx.MatchString(nameAddressText) {
result := nameEmailRgx.FindStringSubmatch(nameAddressText)
name = result[1]
address = result[2]
} else if nameOnlyRgx.MatchString(nameAddressText) {
result := nameOnlyRgx.FindStringSubmatch(nameAddressText)
name = result[1]
} else if emailOnlyRgx.MatchString(nameAddressText) {
result := emailOnlyRgx.FindStringSubmatch(nameAddressText)
address = result[1]
}
return
}
func indexAt(str, substr string, start int) int {
idx := strings.Index(str[start:], substr)
if idx > -1 {
idx += start
}
return idx
}

770
vendor/github.com/mmcdole/gofeed/rss/parser.go generated vendored Normal file
View file

@ -0,0 +1,770 @@
package rss
import (
"fmt"
"io"
"strings"
ext "github.com/mmcdole/gofeed/extensions"
"github.com/mmcdole/gofeed/internal/shared"
xpp "github.com/mmcdole/goxpp"
)
// Parser is a RSS Parser
type Parser struct {
base *shared.XMLBase
}
// Parse parses an xml feed into an rss.Feed
func (rp *Parser) Parse(feed io.Reader) (*Feed, error) {
p := xpp.NewXMLPullParser(feed, false, shared.NewReaderLabel)
rp.base = &shared.XMLBase{}
_, err := rp.base.FindRoot(p)
if err != nil {
return nil, err
}
return rp.parseRoot(p)
}
func (rp *Parser) parseRoot(p *xpp.XMLPullParser) (*Feed, error) {
rssErr := p.Expect(xpp.StartTag, "rss")
rdfErr := p.Expect(xpp.StartTag, "rdf")
if rssErr != nil && rdfErr != nil {
return nil, fmt.Errorf("%s or %s", rssErr.Error(), rdfErr.Error())
}
// Items found in feed root
var channel *Feed
var textinput *TextInput
var image *Image
items := []*Item{}
ver := rp.parseVersion(p)
for {
tok, err := rp.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
// Skip any extensions found in the feed root.
if shared.IsExtension(p) {
p.Skip()
continue
}
name := strings.ToLower(p.Name)
if name == "channel" {
channel, err = rp.parseChannel(p)
if err != nil {
return nil, err
}
} else if name == "item" {
item, err := rp.parseItem(p)
if err != nil {
return nil, err
}
items = append(items, item)
} else if name == "textinput" {
textinput, err = rp.parseTextInput(p)
if err != nil {
return nil, err
}
} else if name == "image" {
image, err = rp.parseImage(p)
if err != nil {
return nil, err
}
} else {
p.Skip()
}
}
}
rssErr = p.Expect(xpp.EndTag, "rss")
rdfErr = p.Expect(xpp.EndTag, "rdf")
if rssErr != nil && rdfErr != nil {
return nil, fmt.Errorf("%s or %s", rssErr.Error(), rdfErr.Error())
}
if channel == nil {
channel = &Feed{}
channel.Items = []*Item{}
}
if len(items) > 0 {
channel.Items = append(channel.Items, items...)
}
if textinput != nil {
channel.TextInput = textinput
}
if image != nil {
channel.Image = image
}
channel.Version = ver
return channel, nil
}
func (rp *Parser) parseChannel(p *xpp.XMLPullParser) (rss *Feed, err error) {
if err = p.Expect(xpp.StartTag, "channel"); err != nil {
return nil, err
}
rss = &Feed{}
rss.Items = []*Item{}
extensions := ext.Extensions{}
categories := []*Category{}
for {
tok, err := rp.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if shared.IsExtension(p) {
ext, err := shared.ParseExtension(extensions, p)
if err != nil {
return nil, err
}
extensions = ext
} else if name == "title" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.Title = result
} else if name == "description" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.Description = result
} else if name == "link" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.Link = result
} else if name == "language" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.Language = result
} else if name == "copyright" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.Copyright = result
} else if name == "managingeditor" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.ManagingEditor = result
} else if name == "webmaster" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.WebMaster = result
} else if name == "pubdate" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.PubDate = result
date, err := shared.ParseDate(result)
if err == nil {
utcDate := date.UTC()
rss.PubDateParsed = &utcDate
}
} else if name == "lastbuilddate" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.LastBuildDate = result
date, err := shared.ParseDate(result)
if err == nil {
utcDate := date.UTC()
rss.LastBuildDateParsed = &utcDate
}
} else if name == "generator" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.Generator = result
} else if name == "docs" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.Docs = result
} else if name == "ttl" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.TTL = result
} else if name == "rating" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
rss.Rating = result
} else if name == "skiphours" {
result, err := rp.parseSkipHours(p)
if err != nil {
return nil, err
}
rss.SkipHours = result
} else if name == "skipdays" {
result, err := rp.parseSkipDays(p)
if err != nil {
return nil, err
}
rss.SkipDays = result
} else if name == "item" {
result, err := rp.parseItem(p)
if err != nil {
return nil, err
}
rss.Items = append(rss.Items, result)
} else if name == "cloud" {
result, err := rp.parseCloud(p)
if err != nil {
return nil, err
}
rss.Cloud = result
} else if name == "category" {
result, err := rp.parseCategory(p)
if err != nil {
return nil, err
}
categories = append(categories, result)
} else if name == "image" {
result, err := rp.parseImage(p)
if err != nil {
return nil, err
}
rss.Image = result
} else if name == "textinput" {
result, err := rp.parseTextInput(p)
if err != nil {
return nil, err
}
rss.TextInput = result
} else {
// Skip element as it isn't an extension and not
// part of the spec
p.Skip()
}
}
}
if err = p.Expect(xpp.EndTag, "channel"); err != nil {
return nil, err
}
if len(categories) > 0 {
rss.Categories = categories
}
if len(extensions) > 0 {
rss.Extensions = extensions
if itunes, ok := rss.Extensions["itunes"]; ok {
rss.ITunesExt = ext.NewITunesFeedExtension(itunes)
}
if dc, ok := rss.Extensions["dc"]; ok {
rss.DublinCoreExt = ext.NewDublinCoreExtension(dc)
}
}
return rss, nil
}
func (rp *Parser) parseItem(p *xpp.XMLPullParser) (item *Item, err error) {
if err = p.Expect(xpp.StartTag, "item"); err != nil {
return nil, err
}
item = &Item{}
extensions := ext.Extensions{}
categories := []*Category{}
for {
tok, err := rp.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if shared.IsExtension(p) {
ext, err := shared.ParseExtension(extensions, p)
if err != nil {
return nil, err
}
item.Extensions = ext
} else if name == "title" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
item.Title = result
} else if name == "description" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
item.Description = result
} else if name == "encoded" {
space := strings.TrimSpace(p.Space)
if prefix, ok := p.Spaces[space]; ok && prefix == "content" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
item.Content = result
}
} else if name == "link" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
item.Link = result
} else if name == "author" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
item.Author = result
} else if name == "comments" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
item.Comments = result
} else if name == "pubdate" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
item.PubDate = result
date, err := shared.ParseDate(result)
if err == nil {
utcDate := date.UTC()
item.PubDateParsed = &utcDate
}
} else if name == "source" {
result, err := rp.parseSource(p)
if err != nil {
return nil, err
}
item.Source = result
} else if name == "enclosure" {
result, err := rp.parseEnclosure(p)
if err != nil {
return nil, err
}
item.Enclosure = result
} else if name == "guid" {
result, err := rp.parseGUID(p)
if err != nil {
return nil, err
}
item.GUID = result
} else if name == "category" {
result, err := rp.parseCategory(p)
if err != nil {
return nil, err
}
categories = append(categories, result)
} else {
// Skip any elements not part of the item spec
p.Skip()
}
}
}
if len(categories) > 0 {
item.Categories = categories
}
if len(extensions) > 0 {
item.Extensions = extensions
if itunes, ok := item.Extensions["itunes"]; ok {
item.ITunesExt = ext.NewITunesItemExtension(itunes)
}
if dc, ok := item.Extensions["dc"]; ok {
item.DublinCoreExt = ext.NewDublinCoreExtension(dc)
}
}
if err = p.Expect(xpp.EndTag, "item"); err != nil {
return nil, err
}
return item, nil
}
func (rp *Parser) parseSource(p *xpp.XMLPullParser) (source *Source, err error) {
if err = p.Expect(xpp.StartTag, "source"); err != nil {
return nil, err
}
source = &Source{}
source.URL = p.Attribute("url")
result, err := shared.ParseText(p)
if err != nil {
return source, err
}
source.Title = result
if err = p.Expect(xpp.EndTag, "source"); err != nil {
return nil, err
}
return source, nil
}
func (rp *Parser) parseEnclosure(p *xpp.XMLPullParser) (enclosure *Enclosure, err error) {
if err = p.Expect(xpp.StartTag, "enclosure"); err != nil {
return nil, err
}
enclosure = &Enclosure{}
enclosure.URL = p.Attribute("url")
enclosure.Length = p.Attribute("length")
enclosure.Type = p.Attribute("type")
// Ignore any enclosure text
_, err = p.NextText()
if err != nil {
return enclosure, err
}
if err = p.Expect(xpp.EndTag, "enclosure"); err != nil {
return nil, err
}
return enclosure, nil
}
func (rp *Parser) parseImage(p *xpp.XMLPullParser) (image *Image, err error) {
if err = p.Expect(xpp.StartTag, "image"); err != nil {
return nil, err
}
image = &Image{}
for {
tok, err := rp.base.NextTag(p)
if err != nil {
return image, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if name == "url" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
image.URL = result
} else if name == "title" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
image.Title = result
} else if name == "link" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
image.Link = result
} else if name == "width" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
image.Width = result
} else if name == "height" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
image.Height = result
} else if name == "description" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
image.Description = result
} else {
p.Skip()
}
}
}
if err = p.Expect(xpp.EndTag, "image"); err != nil {
return nil, err
}
return image, nil
}
func (rp *Parser) parseGUID(p *xpp.XMLPullParser) (guid *GUID, err error) {
if err = p.Expect(xpp.StartTag, "guid"); err != nil {
return nil, err
}
guid = &GUID{}
guid.IsPermalink = p.Attribute("isPermalink")
result, err := shared.ParseText(p)
if err != nil {
return
}
guid.Value = result
if err = p.Expect(xpp.EndTag, "guid"); err != nil {
return nil, err
}
return guid, nil
}
func (rp *Parser) parseCategory(p *xpp.XMLPullParser) (cat *Category, err error) {
if err = p.Expect(xpp.StartTag, "category"); err != nil {
return nil, err
}
cat = &Category{}
cat.Domain = p.Attribute("domain")
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
cat.Value = result
if err = p.Expect(xpp.EndTag, "category"); err != nil {
return nil, err
}
return cat, nil
}
func (rp *Parser) parseTextInput(p *xpp.XMLPullParser) (*TextInput, error) {
if err := p.Expect(xpp.StartTag, "textinput"); err != nil {
return nil, err
}
ti := &TextInput{}
for {
tok, err := rp.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if name == "title" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
ti.Title = result
} else if name == "description" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
ti.Description = result
} else if name == "name" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
ti.Name = result
} else if name == "link" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
ti.Link = result
} else {
p.Skip()
}
}
}
if err := p.Expect(xpp.EndTag, "textinput"); err != nil {
return nil, err
}
return ti, nil
}
func (rp *Parser) parseSkipHours(p *xpp.XMLPullParser) ([]string, error) {
if err := p.Expect(xpp.StartTag, "skiphours"); err != nil {
return nil, err
}
hours := []string{}
for {
tok, err := rp.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if name == "hour" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
hours = append(hours, result)
} else {
p.Skip()
}
}
}
if err := p.Expect(xpp.EndTag, "skiphours"); err != nil {
return nil, err
}
return hours, nil
}
func (rp *Parser) parseSkipDays(p *xpp.XMLPullParser) ([]string, error) {
if err := p.Expect(xpp.StartTag, "skipdays"); err != nil {
return nil, err
}
days := []string{}
for {
tok, err := rp.base.NextTag(p)
if err != nil {
return nil, err
}
if tok == xpp.EndTag {
break
}
if tok == xpp.StartTag {
name := strings.ToLower(p.Name)
if name == "day" {
result, err := shared.ParseText(p)
if err != nil {
return nil, err
}
days = append(days, result)
} else {
p.Skip()
}
}
}
if err := p.Expect(xpp.EndTag, "skipdays"); err != nil {
return nil, err
}
return days, nil
}
func (rp *Parser) parseCloud(p *xpp.XMLPullParser) (*Cloud, error) {
if err := p.Expect(xpp.StartTag, "cloud"); err != nil {
return nil, err
}
cloud := &Cloud{}
cloud.Domain = p.Attribute("domain")
cloud.Port = p.Attribute("port")
cloud.Path = p.Attribute("path")
cloud.RegisterProcedure = p.Attribute("registerProcedure")
cloud.Protocol = p.Attribute("protocol")
rp.base.NextTag(p)
if err := p.Expect(xpp.EndTag, "cloud"); err != nil {
return nil, err
}
return cloud, nil
}
func (rp *Parser) parseVersion(p *xpp.XMLPullParser) (ver string) {
name := strings.ToLower(p.Name)
if name == "rss" {
ver = p.Attribute("version")
} else if name == "rdf" {
ns := p.Attribute("xmlns")
if ns == "http://channel.netscape.com/rdf/simple/0.9/" ||
ns == "http://my.netscape.com/rdf/simple/0.9/" {
ver = "0.9"
} else if ns == "http://purl.org/rss/1.0/" {
ver = "1.0"
}
}
return
}

686
vendor/github.com/mmcdole/gofeed/translator.go generated vendored Normal file
View file

@ -0,0 +1,686 @@
package gofeed
import (
"fmt"
"strings"
"time"
"github.com/mmcdole/gofeed/atom"
ext "github.com/mmcdole/gofeed/extensions"
"github.com/mmcdole/gofeed/internal/shared"
"github.com/mmcdole/gofeed/rss"
)
// Translator converts a particular feed (atom.Feed or rss.Feed)
// into the generic Feed struct
type Translator interface {
Translate(feed interface{}) (*Feed, error)
}
// DefaultRSSTranslator converts an rss.Feed struct
// into the generic Feed struct.
//
// This default implementation defines a set of
// mapping rules between rss.Feed -> Feed
// for each of the fields in Feed.
type DefaultRSSTranslator struct{}
// Translate converts an RSS feed into the universal
// feed type.
func (t *DefaultRSSTranslator) Translate(feed interface{}) (*Feed, error) {
rss, found := feed.(*rss.Feed)
if !found {
return nil, fmt.Errorf("Feed did not match expected type of *rss.Feed")
}
result := &Feed{}
result.Title = t.translateFeedTitle(rss)
result.Description = t.translateFeedDescription(rss)
result.Link = t.translateFeedLink(rss)
result.FeedLink = t.translateFeedFeedLink(rss)
result.Updated = t.translateFeedUpdated(rss)
result.UpdatedParsed = t.translateFeedUpdatedParsed(rss)
result.Published = t.translateFeedPublished(rss)
result.PublishedParsed = t.translateFeedPublishedParsed(rss)
result.Author = t.translateFeedAuthor(rss)
result.Language = t.translateFeedLanguage(rss)
result.Image = t.translateFeedImage(rss)
result.Copyright = t.translateFeedCopyright(rss)
result.Generator = t.translateFeedGenerator(rss)
result.Categories = t.translateFeedCategories(rss)
result.Items = t.translateFeedItems(rss)
result.ITunesExt = rss.ITunesExt
result.DublinCoreExt = rss.DublinCoreExt
result.Extensions = rss.Extensions
result.FeedVersion = rss.Version
result.FeedType = "rss"
return result, nil
}
func (t *DefaultRSSTranslator) translateFeedItem(rssItem *rss.Item) (item *Item) {
item = &Item{}
item.Title = t.translateItemTitle(rssItem)
item.Description = t.translateItemDescription(rssItem)
item.Content = t.translateItemContent(rssItem)
item.Link = t.translateItemLink(rssItem)
item.Published = t.translateItemPublished(rssItem)
item.PublishedParsed = t.translateItemPublishedParsed(rssItem)
item.Author = t.translateItemAuthor(rssItem)
item.GUID = t.translateItemGUID(rssItem)
item.Image = t.translateItemImage(rssItem)
item.Categories = t.translateItemCategories(rssItem)
item.Enclosures = t.translateItemEnclosures(rssItem)
item.DublinCoreExt = rssItem.DublinCoreExt
item.ITunesExt = rssItem.ITunesExt
item.Extensions = rssItem.Extensions
return
}
func (t *DefaultRSSTranslator) translateFeedTitle(rss *rss.Feed) (title string) {
if rss.Title != "" {
title = rss.Title
} else if rss.DublinCoreExt != nil && rss.DublinCoreExt.Title != nil {
title = t.firstEntry(rss.DublinCoreExt.Title)
}
return
}
func (t *DefaultRSSTranslator) translateFeedDescription(rss *rss.Feed) (desc string) {
return rss.Description
}
func (t *DefaultRSSTranslator) translateFeedLink(rss *rss.Feed) (link string) {
if rss.Link != "" {
link = rss.Link
} else if rss.ITunesExt != nil && rss.ITunesExt.Subtitle != "" {
link = rss.ITunesExt.Subtitle
}
return
}
func (t *DefaultRSSTranslator) translateFeedFeedLink(rss *rss.Feed) (link string) {
atomExtensions := t.extensionsForKeys([]string{"atom", "atom10", "atom03"}, rss.Extensions)
for _, ex := range atomExtensions {
if links, ok := ex["link"]; ok {
for _, l := range links {
if l.Attrs["Rel"] == "self" {
link = l.Value
}
}
}
}
return
}
func (t *DefaultRSSTranslator) translateFeedUpdated(rss *rss.Feed) (updated string) {
if rss.LastBuildDate != "" {
updated = rss.LastBuildDate
} else if rss.DublinCoreExt != nil && rss.DublinCoreExt.Date != nil {
updated = t.firstEntry(rss.DublinCoreExt.Date)
}
return
}
func (t *DefaultRSSTranslator) translateFeedUpdatedParsed(rss *rss.Feed) (updated *time.Time) {
if rss.LastBuildDateParsed != nil {
updated = rss.LastBuildDateParsed
} else if rss.DublinCoreExt != nil && rss.DublinCoreExt.Date != nil {
dateText := t.firstEntry(rss.DublinCoreExt.Date)
date, err := shared.ParseDate(dateText)
if err == nil {
updated = &date
}
}
return
}
func (t *DefaultRSSTranslator) translateFeedPublished(rss *rss.Feed) (published string) {
return rss.PubDate
}
func (t *DefaultRSSTranslator) translateFeedPublishedParsed(rss *rss.Feed) (published *time.Time) {
return rss.PubDateParsed
}
func (t *DefaultRSSTranslator) translateFeedAuthor(rss *rss.Feed) (author *Person) {
if rss.ManagingEditor != "" {
name, address := shared.ParseNameAddress(rss.ManagingEditor)
author = &Person{}
author.Name = name
author.Email = address
} else if rss.WebMaster != "" {
name, address := shared.ParseNameAddress(rss.WebMaster)
author = &Person{}
author.Name = name
author.Email = address
} else if rss.DublinCoreExt != nil && rss.DublinCoreExt.Author != nil {
dcAuthor := t.firstEntry(rss.DublinCoreExt.Author)
name, address := shared.ParseNameAddress(dcAuthor)
author = &Person{}
author.Name = name
author.Email = address
} else if rss.DublinCoreExt != nil && rss.DublinCoreExt.Creator != nil {
dcCreator := t.firstEntry(rss.DublinCoreExt.Creator)
name, address := shared.ParseNameAddress(dcCreator)
author = &Person{}
author.Name = name
author.Email = address
} else if rss.ITunesExt != nil && rss.ITunesExt.Author != "" {
name, address := shared.ParseNameAddress(rss.ITunesExt.Author)
author = &Person{}
author.Name = name
author.Email = address
}
return
}
func (t *DefaultRSSTranslator) translateFeedLanguage(rss *rss.Feed) (language string) {
if rss.Language != "" {
language = rss.Language
} else if rss.DublinCoreExt != nil && rss.DublinCoreExt.Language != nil {
language = t.firstEntry(rss.DublinCoreExt.Language)
}
return
}
func (t *DefaultRSSTranslator) translateFeedImage(rss *rss.Feed) (image *Image) {
if rss.Image != nil {
image = &Image{}
image.Title = rss.Image.Title
image.URL = rss.Image.URL
} else if rss.ITunesExt != nil && rss.ITunesExt.Image != "" {
image = &Image{}
image.URL = rss.ITunesExt.Image
}
return
}
func (t *DefaultRSSTranslator) translateFeedCopyright(rss *rss.Feed) (rights string) {
if rss.Copyright != "" {
rights = rss.Copyright
} else if rss.DublinCoreExt != nil && rss.DublinCoreExt.Rights != nil {
rights = t.firstEntry(rss.DublinCoreExt.Rights)
}
return
}
func (t *DefaultRSSTranslator) translateFeedGenerator(rss *rss.Feed) (generator string) {
return rss.Generator
}
func (t *DefaultRSSTranslator) translateFeedCategories(rss *rss.Feed) (categories []string) {
cats := []string{}
if rss.Categories != nil {
for _, c := range rss.Categories {
cats = append(cats, c.Value)
}
}
if rss.ITunesExt != nil && rss.ITunesExt.Keywords != "" {
keywords := strings.Split(rss.ITunesExt.Keywords, ",")
for _, k := range keywords {
cats = append(cats, k)
}
}
if rss.ITunesExt != nil && rss.ITunesExt.Categories != nil {
for _, c := range rss.ITunesExt.Categories {
cats = append(cats, c.Text)
if c.Subcategory != nil {
cats = append(cats, c.Subcategory.Text)
}
}
}
if rss.DublinCoreExt != nil && rss.DublinCoreExt.Subject != nil {
for _, c := range rss.DublinCoreExt.Subject {
cats = append(cats, c)
}
}
if len(cats) > 0 {
categories = cats
}
return
}
func (t *DefaultRSSTranslator) translateFeedItems(rss *rss.Feed) (items []*Item) {
items = []*Item{}
for _, i := range rss.Items {
items = append(items, t.translateFeedItem(i))
}
return
}
func (t *DefaultRSSTranslator) translateItemTitle(rssItem *rss.Item) (title string) {
if rssItem.Title != "" {
title = rssItem.Title
} else if rssItem.DublinCoreExt != nil && rssItem.DublinCoreExt.Title != nil {
title = t.firstEntry(rssItem.DublinCoreExt.Title)
}
return
}
func (t *DefaultRSSTranslator) translateItemDescription(rssItem *rss.Item) (desc string) {
if rssItem.Description != "" {
desc = rssItem.Description
} else if rssItem.DublinCoreExt != nil && rssItem.DublinCoreExt.Description != nil {
desc = t.firstEntry(rssItem.DublinCoreExt.Description)
}
return
}
func (t *DefaultRSSTranslator) translateItemContent(rssItem *rss.Item) (content string) {
return rssItem.Content
}
func (t *DefaultRSSTranslator) translateItemLink(rssItem *rss.Item) (link string) {
return rssItem.Link
}
func (t *DefaultRSSTranslator) translateItemUpdated(rssItem *rss.Item) (updated string) {
if rssItem.DublinCoreExt != nil && rssItem.DublinCoreExt.Date != nil {
updated = t.firstEntry(rssItem.DublinCoreExt.Date)
}
return updated
}
func (t *DefaultRSSTranslator) translateItemUpdatedParsed(rssItem *rss.Item) (updated *time.Time) {
if rssItem.DublinCoreExt != nil && rssItem.DublinCoreExt.Date != nil {
updatedText := t.firstEntry(rssItem.DublinCoreExt.Date)
updatedDate, err := shared.ParseDate(updatedText)
if err == nil {
updated = &updatedDate
}
}
return
}
func (t *DefaultRSSTranslator) translateItemPublished(rssItem *rss.Item) (pubDate string) {
if rssItem.PubDate != "" {
return rssItem.PubDate
} else if rssItem.DublinCoreExt != nil && rssItem.DublinCoreExt.Date != nil {
return t.firstEntry(rssItem.DublinCoreExt.Date)
}
return
}
func (t *DefaultRSSTranslator) translateItemPublishedParsed(rssItem *rss.Item) (pubDate *time.Time) {
if rssItem.PubDateParsed != nil {
return rssItem.PubDateParsed
} else if rssItem.DublinCoreExt != nil && rssItem.DublinCoreExt.Date != nil {
pubDateText := t.firstEntry(rssItem.DublinCoreExt.Date)
pubDateParsed, err := shared.ParseDate(pubDateText)
if err == nil {
pubDate = &pubDateParsed
}
}
return
}
func (t *DefaultRSSTranslator) translateItemAuthor(rssItem *rss.Item) (author *Person) {
if rssItem.Author != "" {
name, address := shared.ParseNameAddress(rssItem.Author)
author = &Person{}
author.Name = name
author.Email = address
} else if rssItem.DublinCoreExt != nil && rssItem.DublinCoreExt.Author != nil {
dcAuthor := t.firstEntry(rssItem.DublinCoreExt.Author)
name, address := shared.ParseNameAddress(dcAuthor)
author = &Person{}
author.Name = name
author.Email = address
} else if rssItem.DublinCoreExt != nil && rssItem.DublinCoreExt.Creator != nil {
dcCreator := t.firstEntry(rssItem.DublinCoreExt.Creator)
name, address := shared.ParseNameAddress(dcCreator)
author = &Person{}
author.Name = name
author.Email = address
} else if rssItem.ITunesExt != nil && rssItem.ITunesExt.Author != "" {
name, address := shared.ParseNameAddress(rssItem.ITunesExt.Author)
author = &Person{}
author.Name = name
author.Email = address
}
return
}
func (t *DefaultRSSTranslator) translateItemGUID(rssItem *rss.Item) (guid string) {
if rssItem.GUID != nil {
guid = rssItem.GUID.Value
}
return
}
func (t *DefaultRSSTranslator) translateItemImage(rssItem *rss.Item) (image *Image) {
if rssItem.ITunesExt != nil && rssItem.ITunesExt.Image != "" {
image = &Image{}
image.URL = rssItem.ITunesExt.Image
}
return
}
func (t *DefaultRSSTranslator) translateItemCategories(rssItem *rss.Item) (categories []string) {
cats := []string{}
if rssItem.Categories != nil {
for _, c := range rssItem.Categories {
cats = append(cats, c.Value)
}
}
if rssItem.ITunesExt != nil && rssItem.ITunesExt.Keywords != "" {
keywords := strings.Split(rssItem.ITunesExt.Keywords, ",")
for _, k := range keywords {
cats = append(cats, k)
}
}
if rssItem.DublinCoreExt != nil && rssItem.DublinCoreExt.Subject != nil {
for _, c := range rssItem.DublinCoreExt.Subject {
cats = append(cats, c)
}
}
if len(cats) > 0 {
categories = cats
}
return
}
func (t *DefaultRSSTranslator) translateItemEnclosures(rssItem *rss.Item) (enclosures []*Enclosure) {
if rssItem.Enclosure != nil {
e := &Enclosure{}
e.URL = rssItem.Enclosure.URL
e.Type = rssItem.Enclosure.Type
e.Length = rssItem.Enclosure.Length
enclosures = []*Enclosure{e}
}
return
}
func (t *DefaultRSSTranslator) extensionsForKeys(keys []string, extensions ext.Extensions) (matches []map[string][]ext.Extension) {
matches = []map[string][]ext.Extension{}
if extensions == nil {
return
}
for _, key := range keys {
if match, ok := extensions[key]; ok {
matches = append(matches, match)
}
}
return
}
func (t *DefaultRSSTranslator) firstEntry(entries []string) (value string) {
if entries == nil {
return
}
if len(entries) == 0 {
return
}
return entries[0]
}
// DefaultAtomTranslator converts an atom.Feed struct
// into the generic Feed struct.
//
// This default implementation defines a set of
// mapping rules between atom.Feed -> Feed
// for each of the fields in Feed.
type DefaultAtomTranslator struct{}
// Translate converts an Atom feed into the universal
// feed type.
func (t *DefaultAtomTranslator) Translate(feed interface{}) (*Feed, error) {
atom, found := feed.(*atom.Feed)
if !found {
return nil, fmt.Errorf("Feed did not match expected type of *atom.Feed")
}
result := &Feed{}
result.Title = t.translateFeedTitle(atom)
result.Description = t.translateFeedDescription(atom)
result.Link = t.translateFeedLink(atom)
result.FeedLink = t.translateFeedFeedLink(atom)
result.Updated = t.translateFeedUpdated(atom)
result.UpdatedParsed = t.translateFeedUpdatedParsed(atom)
result.Author = t.translateFeedAuthor(atom)
result.Language = t.translateFeedLanguage(atom)
result.Image = t.translateFeedImage(atom)
result.Copyright = t.translateFeedCopyright(atom)
result.Categories = t.translateFeedCategories(atom)
result.Generator = t.translateFeedGenerator(atom)
result.Items = t.translateFeedItems(atom)
result.Extensions = atom.Extensions
result.FeedVersion = atom.Version
result.FeedType = "atom"
return result, nil
}
func (t *DefaultAtomTranslator) translateFeedItem(entry *atom.Entry) (item *Item) {
item = &Item{}
item.Title = t.translateItemTitle(entry)
item.Description = t.translateItemDescription(entry)
item.Content = t.translateItemContent(entry)
item.Link = t.translateItemLink(entry)
item.Updated = t.translateItemUpdated(entry)
item.UpdatedParsed = t.translateItemUpdatedParsed(entry)
item.Published = t.translateItemPublished(entry)
item.PublishedParsed = t.translateItemPublishedParsed(entry)
item.Author = t.translateItemAuthor(entry)
item.GUID = t.translateItemGUID(entry)
item.Image = t.translateItemImage(entry)
item.Categories = t.translateItemCategories(entry)
item.Enclosures = t.translateItemEnclosures(entry)
item.Extensions = entry.Extensions
return
}
func (t *DefaultAtomTranslator) translateFeedTitle(atom *atom.Feed) (title string) {
return atom.Title
}
func (t *DefaultAtomTranslator) translateFeedDescription(atom *atom.Feed) (desc string) {
return atom.Subtitle
}
func (t *DefaultAtomTranslator) translateFeedLink(atom *atom.Feed) (link string) {
l := t.firstLinkWithType("alternate", atom.Links)
if l != nil {
link = l.Href
}
return
}
func (t *DefaultAtomTranslator) translateFeedFeedLink(atom *atom.Feed) (link string) {
feedLink := t.firstLinkWithType("self", atom.Links)
if feedLink != nil {
link = feedLink.Href
}
return
}
func (t *DefaultAtomTranslator) translateFeedUpdated(atom *atom.Feed) (updated string) {
return atom.Updated
}
func (t *DefaultAtomTranslator) translateFeedUpdatedParsed(atom *atom.Feed) (updated *time.Time) {
return atom.UpdatedParsed
}
func (t *DefaultAtomTranslator) translateFeedAuthor(atom *atom.Feed) (author *Person) {
a := t.firstPerson(atom.Authors)
if a != nil {
feedAuthor := Person{}
feedAuthor.Name = a.Name
feedAuthor.Email = a.Email
author = &feedAuthor
}
return
}
func (t *DefaultAtomTranslator) translateFeedLanguage(atom *atom.Feed) (language string) {
return atom.Language
}
func (t *DefaultAtomTranslator) translateFeedImage(atom *atom.Feed) (image *Image) {
if atom.Logo != "" {
feedImage := Image{}
feedImage.URL = atom.Logo
image = &feedImage
}
return
}
func (t *DefaultAtomTranslator) translateFeedCopyright(atom *atom.Feed) (rights string) {
return atom.Rights
}
func (t *DefaultAtomTranslator) translateFeedGenerator(atom *atom.Feed) (generator string) {
if atom.Generator != nil {
if atom.Generator.Value != "" {
generator += atom.Generator.Value
}
if atom.Generator.Version != "" {
generator += " v" + atom.Generator.Version
}
if atom.Generator.URI != "" {
generator += " " + atom.Generator.URI
}
generator = strings.TrimSpace(generator)
}
return
}
func (t *DefaultAtomTranslator) translateFeedCategories(atom *atom.Feed) (categories []string) {
if atom.Categories != nil {
categories = []string{}
for _, c := range atom.Categories {
categories = append(categories, c.Term)
}
}
return
}
func (t *DefaultAtomTranslator) translateFeedItems(atom *atom.Feed) (items []*Item) {
items = []*Item{}
for _, entry := range atom.Entries {
items = append(items, t.translateFeedItem(entry))
}
return
}
func (t *DefaultAtomTranslator) translateItemTitle(entry *atom.Entry) (title string) {
return entry.Title
}
func (t *DefaultAtomTranslator) translateItemDescription(entry *atom.Entry) (desc string) {
return entry.Summary
}
func (t *DefaultAtomTranslator) translateItemContent(entry *atom.Entry) (content string) {
if entry.Content != nil {
content = entry.Content.Value
}
return
}
func (t *DefaultAtomTranslator) translateItemLink(entry *atom.Entry) (link string) {
l := t.firstLinkWithType("alternate", entry.Links)
if l != nil {
link = l.Href
}
return
}
func (t *DefaultAtomTranslator) translateItemUpdated(entry *atom.Entry) (updated string) {
return entry.Updated
}
func (t *DefaultAtomTranslator) translateItemUpdatedParsed(entry *atom.Entry) (updated *time.Time) {
return entry.UpdatedParsed
}
func (t *DefaultAtomTranslator) translateItemPublished(entry *atom.Entry) (updated string) {
return entry.Published
}
func (t *DefaultAtomTranslator) translateItemPublishedParsed(entry *atom.Entry) (updated *time.Time) {
return entry.PublishedParsed
}
func (t *DefaultAtomTranslator) translateItemAuthor(entry *atom.Entry) (author *Person) {
a := t.firstPerson(entry.Authors)
if a != nil {
author = &Person{}
author.Name = a.Name
author.Email = a.Email
}
return
}
func (t *DefaultAtomTranslator) translateItemGUID(entry *atom.Entry) (guid string) {
return entry.ID
}
func (t *DefaultAtomTranslator) translateItemImage(entry *atom.Entry) (image *Image) {
return nil
}
func (t *DefaultAtomTranslator) translateItemCategories(entry *atom.Entry) (categories []string) {
if entry.Categories != nil {
categories = []string{}
for _, c := range entry.Categories {
categories = append(categories, c.Term)
}
}
return
}
func (t *DefaultAtomTranslator) translateItemEnclosures(entry *atom.Entry) (enclosures []*Enclosure) {
if entry.Links != nil {
enclosures = []*Enclosure{}
for _, e := range entry.Links {
if e.Rel == "enclosure" {
enclosure := &Enclosure{}
enclosure.URL = e.Href
enclosure.Length = e.Length
enclosure.Type = e.Type
enclosures = append(enclosures, enclosure)
}
}
if len(enclosures) == 0 {
enclosures = nil
}
}
return
}
func (t *DefaultAtomTranslator) firstLinkWithType(linkType string, links []*atom.Link) *atom.Link {
if links == nil {
return nil
}
for _, link := range links {
if link.Rel == linkType {
return link
}
}
return nil
}
func (t *DefaultAtomTranslator) firstPerson(persons []*atom.Person) (person *atom.Person) {
if persons == nil || len(persons) == 0 {
return
}
person = persons[0]
return
}

305
vendor/github.com/olekukonko/tablewriter/README.md generated vendored Normal file
View file

@ -0,0 +1,305 @@
ASCII Table Writer
=========
[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter)
[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter)
[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter)
Generate ASCII table on the fly ... Installation is simple as
go get github.com/olekukonko/tablewriter
#### Features
- Automatic Padding
- Support Multiple Lines
- Supports Alignment
- Support Custom Separators
- Automatic Alignment of numbers & percentage
- Write directly to http , file etc via `io.Writer`
- Read directly from CSV file
- Optional row line via `SetRowLine`
- Normalise table header
- Make CSV Headers optional
- Enable or disable table border
- Set custom footer support
- Optional identical cells merging
- Set custom caption
- Optional reflowing of paragrpahs in multi-line cells.
#### Example 1 - Basic
```go
data := [][]string{
[]string{"A", "The Good", "500"},
[]string{"B", "The Very very Bad Man", "288"},
[]string{"C", "The Ugly", "120"},
[]string{"D", "The Gopher", "800"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Name", "Sign", "Rating"})
for _, v := range data {
table.Append(v)
}
table.Render() // Send output
```
##### Output 1
```
+------+-----------------------+--------+
| NAME | SIGN | RATING |
+------+-----------------------+--------+
| A | The Good | 500 |
| B | The Very very Bad Man | 288 |
| C | The Ugly | 120 |
| D | The Gopher | 800 |
+------+-----------------------+--------+
```
#### Example 2 - Without Border / Footer / Bulk Append
```go
data := [][]string{
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
[]string{"1/1/2014", "January Hosting", "2233", "$54.95"},
[]string{"1/4/2014", "February Hosting", "2233", "$51.00"},
[]string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer
table.SetBorder(false) // Set Border to false
table.AppendBulk(data) // Add Bulk Data
table.Render()
```
##### Output 2
```
DATE | DESCRIPTION | CV2 | AMOUNT
-----------+--------------------------+-------+----------
1/1/2014 | Domain name | 2233 | $10.98
1/1/2014 | January Hosting | 2233 | $54.95
1/4/2014 | February Hosting | 2233 | $51.00
1/4/2014 | February Extra Bandwidth | 2233 | $30.00
-----------+--------------------------+-------+----------
TOTAL | $146 93
--------+----------
```
#### Example 3 - CSV
```go
table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test_info.csv", true)
table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment
table.Render()
```
##### Output 3
```
+----------+--------------+------+-----+---------+----------------+
| FIELD | TYPE | NULL | KEY | DEFAULT | EXTRA |
+----------+--------------+------+-----+---------+----------------+
| user_id | smallint(5) | NO | PRI | NULL | auto_increment |
| username | varchar(10) | NO | | NULL | |
| password | varchar(100) | NO | | NULL | |
+----------+--------------+------+-----+---------+----------------+
```
#### Example 4 - Custom Separator
```go
table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test.csv", true)
table.SetRowLine(true) // Enable row line
// Change table lines
table.SetCenterSeparator("*")
table.SetColumnSeparator("╪")
table.SetRowSeparator("-")
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.Render()
```
##### Output 4
```
*------------*-----------*---------*
╪ FIRST NAME ╪ LAST NAME ╪ SSN ╪
*------------*-----------*---------*
╪ John ╪ Barry ╪ 123456 ╪
*------------*-----------*---------*
╪ Kathy ╪ Smith ╪ 687987 ╪
*------------*-----------*---------*
╪ Bob ╪ McCornick ╪ 3979870 ╪
*------------*-----------*---------*
```
#### Example 5 - Markdown Format
```go
data := [][]string{
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
[]string{"1/1/2014", "January Hosting", "2233", "$54.95"},
[]string{"1/4/2014", "February Hosting", "2233", "$51.00"},
[]string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.AppendBulk(data) // Add Bulk Data
table.Render()
```
##### Output 5
```
| DATE | DESCRIPTION | CV2 | AMOUNT |
|----------|--------------------------|------|--------|
| 1/1/2014 | Domain name | 2233 | $10.98 |
| 1/1/2014 | January Hosting | 2233 | $54.95 |
| 1/4/2014 | February Hosting | 2233 | $51.00 |
| 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 |
```
#### Example 6 - Identical cells merging
```go
data := [][]string{
[]string{"1/1/2014", "Domain name", "1234", "$10.98"},
[]string{"1/1/2014", "January Hosting", "2345", "$54.95"},
[]string{"1/4/2014", "February Hosting", "3456", "$51.00"},
[]string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
table.SetFooter([]string{"", "", "Total", "$146.93"})
table.SetAutoMergeCells(true)
table.SetRowLine(true)
table.AppendBulk(data)
table.Render()
```
##### Output 6
```
+----------+--------------------------+-------+---------+
| DATE | DESCRIPTION | CV2 | AMOUNT |
+----------+--------------------------+-------+---------+
| 1/1/2014 | Domain name | 1234 | $10.98 |
+ +--------------------------+-------+---------+
| | January Hosting | 2345 | $54.95 |
+----------+--------------------------+-------+---------+
| 1/4/2014 | February Hosting | 3456 | $51.00 |
+ +--------------------------+-------+---------+
| | February Extra Bandwidth | 4567 | $30.00 |
+----------+--------------------------+-------+---------+
| TOTAL | $146 93 |
+----------+--------------------------+-------+---------+
```
#### Table with color
```go
data := [][]string{
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
[]string{"1/1/2014", "January Hosting", "2233", "$54.95"},
[]string{"1/4/2014", "February Hosting", "2233", "$51.00"},
[]string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer
table.SetBorder(false) // Set Border to false
table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor},
tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor},
tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor},
tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor})
table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor})
table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{},
tablewriter.Colors{tablewriter.Bold},
tablewriter.Colors{tablewriter.FgHiRedColor})
table.AppendBulk(data)
table.Render()
```
#### Table with color Output
![Table with Color](https://cloud.githubusercontent.com/assets/6460392/21101956/bbc7b356-c0a1-11e6-9f36-dba694746efc.png)
#### Example 6 - Set table caption
```go
data := [][]string{
[]string{"A", "The Good", "500"},
[]string{"B", "The Very very Bad Man", "288"},
[]string{"C", "The Ugly", "120"},
[]string{"D", "The Gopher", "800"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Name", "Sign", "Rating"})
table.SetCaption(true, "Movie ratings.")
for _, v := range data {
table.Append(v)
}
table.Render() // Send output
```
Note: Caption text will wrap with total width of rendered table.
##### Output 6
```
+------+-----------------------+--------+
| NAME | SIGN | RATING |
+------+-----------------------+--------+
| A | The Good | 500 |
| B | The Very very Bad Man | 288 |
| C | The Ugly | 120 |
| D | The Gopher | 800 |
+------+-----------------------+--------+
Movie ratings.
```
#### Render table into a string
Instead of rendering the table to `io.Stdout` you can also render it into a string. Go 1.10 introduced the `strings.Builder` type which implements the `io.Writer` interface and can therefore be used for this task. Example:
```go
package main
import (
"strings"
"fmt"
"github.com/olekukonko/tablewriter"
)
func main() {
tableString := &strings.Builder{}
table := tablewriter.NewWriter(tableString)
/*
* Code to fill the table
*/
table.Render()
fmt.Println(tableString.String())
}
```
#### TODO
- ~~Import Directly from CSV~~ - `done`
- ~~Support for `SetFooter`~~ - `done`
- ~~Support for `SetBorder`~~ - `done`
- ~~Support table with uneven rows~~ - `done`
- ~~Support custom alignment~~
- General Improvement & Optimisation
- `NewHTML` Parse table from HTML

876
vendor/github.com/olekukonko/tablewriter/table.go generated vendored Normal file
View file

@ -0,0 +1,876 @@
// Copyright 2014 Oleku Konko All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
// This module is a Table Writer API for the Go Programming Language.
// The protocols were written in pure Go and works on windows and unix systems
// Create & Generate text based table
package tablewriter
import (
"bytes"
"fmt"
"io"
"regexp"
"strings"
)
const (
MAX_ROW_WIDTH = 30
)
const (
CENTER = "+"
ROW = "-"
COLUMN = "|"
SPACE = " "
NEWLINE = "\n"
)
const (
ALIGN_DEFAULT = iota
ALIGN_CENTER
ALIGN_RIGHT
ALIGN_LEFT
)
var (
decimal = regexp.MustCompile(`^-?(?:\d{1,3}(?:,\d{3})*|\d+)(?:\.\d+)?$`)
percent = regexp.MustCompile(`^-?\d+\.?\d*$%$`)
)
type Border struct {
Left bool
Right bool
Top bool
Bottom bool
}
type Table struct {
out io.Writer
rows [][]string
lines [][][]string
cs map[int]int
rs map[int]int
headers [][]string
footers [][]string
caption bool
captionText string
autoFmt bool
autoWrap bool
reflowText bool
mW int
pCenter string
pRow string
pColumn string
tColumn int
tRow int
hAlign int
fAlign int
align int
newLine string
rowLine bool
autoMergeCells bool
hdrLine bool
borders Border
colSize int
headerParams []string
columnsParams []string
footerParams []string
columnsAlign []int
}
// Start New Table
// Take io.Writer Directly
func NewWriter(writer io.Writer) *Table {
t := &Table{
out: writer,
rows: [][]string{},
lines: [][][]string{},
cs: make(map[int]int),
rs: make(map[int]int),
headers: [][]string{},
footers: [][]string{},
caption: false,
captionText: "Table caption.",
autoFmt: true,
autoWrap: true,
reflowText: true,
mW: MAX_ROW_WIDTH,
pCenter: CENTER,
pRow: ROW,
pColumn: COLUMN,
tColumn: -1,
tRow: -1,
hAlign: ALIGN_DEFAULT,
fAlign: ALIGN_DEFAULT,
align: ALIGN_DEFAULT,
newLine: NEWLINE,
rowLine: false,
hdrLine: true,
borders: Border{Left: true, Right: true, Bottom: true, Top: true},
colSize: -1,
headerParams: []string{},
columnsParams: []string{},
footerParams: []string{},
columnsAlign: []int{}}
return t
}
// Render table output
func (t *Table) Render() {
if t.borders.Top {
t.printLine(true)
}
t.printHeading()
if t.autoMergeCells {
t.printRowsMergeCells()
} else {
t.printRows()
}
if !t.rowLine && t.borders.Bottom {
t.printLine(true)
}
t.printFooter()
if t.caption {
t.printCaption()
}
}
const (
headerRowIdx = -1
footerRowIdx = -2
)
// Set table header
func (t *Table) SetHeader(keys []string) {
t.colSize = len(keys)
for i, v := range keys {
lines := t.parseDimension(v, i, headerRowIdx)
t.headers = append(t.headers, lines)
}
}
// Set table Footer
func (t *Table) SetFooter(keys []string) {
//t.colSize = len(keys)
for i, v := range keys {
lines := t.parseDimension(v, i, footerRowIdx)
t.footers = append(t.footers, lines)
}
}
// Set table Caption
func (t *Table) SetCaption(caption bool, captionText ...string) {
t.caption = caption
if len(captionText) == 1 {
t.captionText = captionText[0]
}
}
// Turn header autoformatting on/off. Default is on (true).
func (t *Table) SetAutoFormatHeaders(auto bool) {
t.autoFmt = auto
}
// Turn automatic multiline text adjustment on/off. Default is on (true).
func (t *Table) SetAutoWrapText(auto bool) {
t.autoWrap = auto
}
// Turn automatic reflowing of multiline text when rewrapping. Default is on (true).
func (t *Table) SetReflowDuringAutoWrap(auto bool) {
t.reflowText = auto
}
// Set the Default column width
func (t *Table) SetColWidth(width int) {
t.mW = width
}
// Set the minimal width for a column
func (t *Table) SetColMinWidth(column int, width int) {
t.cs[column] = width
}
// Set the Column Separator
func (t *Table) SetColumnSeparator(sep string) {
t.pColumn = sep
}
// Set the Row Separator
func (t *Table) SetRowSeparator(sep string) {
t.pRow = sep
}
// Set the center Separator
func (t *Table) SetCenterSeparator(sep string) {
t.pCenter = sep
}
// Set Header Alignment
func (t *Table) SetHeaderAlignment(hAlign int) {
t.hAlign = hAlign
}
// Set Footer Alignment
func (t *Table) SetFooterAlignment(fAlign int) {
t.fAlign = fAlign
}
// Set Table Alignment
func (t *Table) SetAlignment(align int) {
t.align = align
}
func (t *Table) SetColumnAlignment(keys []int) {
for _, v := range keys {
switch v {
case ALIGN_CENTER:
break
case ALIGN_LEFT:
break
case ALIGN_RIGHT:
break
default:
v = ALIGN_DEFAULT
}
t.columnsAlign = append(t.columnsAlign, v)
}
}
// Set New Line
func (t *Table) SetNewLine(nl string) {
t.newLine = nl
}
// Set Header Line
// This would enable / disable a line after the header
func (t *Table) SetHeaderLine(line bool) {
t.hdrLine = line
}
// Set Row Line
// This would enable / disable a line on each row of the table
func (t *Table) SetRowLine(line bool) {
t.rowLine = line
}
// Set Auto Merge Cells
// This would enable / disable the merge of cells with identical values
func (t *Table) SetAutoMergeCells(auto bool) {
t.autoMergeCells = auto
}
// Set Table Border
// This would enable / disable line around the table
func (t *Table) SetBorder(border bool) {
t.SetBorders(Border{border, border, border, border})
}
func (t *Table) SetBorders(border Border) {
t.borders = border
}
// Append row to table
func (t *Table) Append(row []string) {
rowSize := len(t.headers)
if rowSize > t.colSize {
t.colSize = rowSize
}
n := len(t.lines)
line := [][]string{}
for i, v := range row {
// Detect string width
// Detect String height
// Break strings into words
out := t.parseDimension(v, i, n)
// Append broken words
line = append(line, out)
}
t.lines = append(t.lines, line)
}
// Allow Support for Bulk Append
// Eliminates repeated for loops
func (t *Table) AppendBulk(rows [][]string) {
for _, row := range rows {
t.Append(row)
}
}
// NumLines to get the number of lines
func (t *Table) NumLines() int {
return len(t.lines)
}
// Clear rows
func (t *Table) ClearRows() {
t.lines = [][][]string{}
}
// Clear footer
func (t *Table) ClearFooter() {
t.footers = [][]string{}
}
// Center based on position and border.
func (t *Table) center(i int) string {
if i == -1 && !t.borders.Left {
return t.pRow
}
if i == len(t.cs)-1 && !t.borders.Right {
return t.pRow
}
return t.pCenter
}
// Print line based on row width
func (t *Table) printLine(nl bool) {
fmt.Fprint(t.out, t.center(-1))
for i := 0; i < len(t.cs); i++ {
v := t.cs[i]
fmt.Fprintf(t.out, "%s%s%s%s",
t.pRow,
strings.Repeat(string(t.pRow), v),
t.pRow,
t.center(i))
}
if nl {
fmt.Fprint(t.out, t.newLine)
}
}
// Print line based on row width with our without cell separator
func (t *Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) {
fmt.Fprint(t.out, t.pCenter)
for i := 0; i < len(t.cs); i++ {
v := t.cs[i]
if i > len(displayCellSeparator) || displayCellSeparator[i] {
// Display the cell separator
fmt.Fprintf(t.out, "%s%s%s%s",
t.pRow,
strings.Repeat(string(t.pRow), v),
t.pRow,
t.pCenter)
} else {
// Don't display the cell separator for this cell
fmt.Fprintf(t.out, "%s%s",
strings.Repeat(" ", v+2),
t.pCenter)
}
}
if nl {
fmt.Fprint(t.out, t.newLine)
}
}
// Return the PadRight function if align is left, PadLeft if align is right,
// and Pad by default
func pad(align int) func(string, string, int) string {
padFunc := Pad
switch align {
case ALIGN_LEFT:
padFunc = PadRight
case ALIGN_RIGHT:
padFunc = PadLeft
}
return padFunc
}
// Print heading information
func (t *Table) printHeading() {
// Check if headers is available
if len(t.headers) < 1 {
return
}
// Identify last column
end := len(t.cs) - 1
// Get pad function
padFunc := pad(t.hAlign)
// Checking for ANSI escape sequences for header
is_esc_seq := false
if len(t.headerParams) > 0 {
is_esc_seq = true
}
// Maximum height.
max := t.rs[headerRowIdx]
// Print Heading
for x := 0; x < max; x++ {
// Check if border is set
// Replace with space if not set
fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
for y := 0; y <= end; y++ {
v := t.cs[y]
h := ""
if y < len(t.headers) && x < len(t.headers[y]) {
h = t.headers[y][x]
}
if t.autoFmt {
h = Title(h)
}
pad := ConditionString((y == end && !t.borders.Left), SPACE, t.pColumn)
if is_esc_seq {
fmt.Fprintf(t.out, " %s %s",
format(padFunc(h, SPACE, v),
t.headerParams[y]), pad)
} else {
fmt.Fprintf(t.out, " %s %s",
padFunc(h, SPACE, v),
pad)
}
}
// Next line
fmt.Fprint(t.out, t.newLine)
}
if t.hdrLine {
t.printLine(true)
}
}
// Print heading information
func (t *Table) printFooter() {
// Check if headers is available
if len(t.footers) < 1 {
return
}
// Only print line if border is not set
if !t.borders.Bottom {
t.printLine(true)
}
// Identify last column
end := len(t.cs) - 1
// Get pad function
padFunc := pad(t.fAlign)
// Checking for ANSI escape sequences for header
is_esc_seq := false
if len(t.footerParams) > 0 {
is_esc_seq = true
}
// Maximum height.
max := t.rs[footerRowIdx]
// Print Footer
erasePad := make([]bool, len(t.footers))
for x := 0; x < max; x++ {
// Check if border is set
// Replace with space if not set
fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE))
for y := 0; y <= end; y++ {
v := t.cs[y]
f := ""
if y < len(t.footers) && x < len(t.footers[y]) {
f = t.footers[y][x]
}
if t.autoFmt {
f = Title(f)
}
pad := ConditionString((y == end && !t.borders.Top), SPACE, t.pColumn)
if erasePad[y] || (x == 0 && len(f) == 0) {
pad = SPACE
erasePad[y] = true
}
if is_esc_seq {
fmt.Fprintf(t.out, " %s %s",
format(padFunc(f, SPACE, v),
t.footerParams[y]), pad)
} else {
fmt.Fprintf(t.out, " %s %s",
padFunc(f, SPACE, v),
pad)
}
//fmt.Fprintf(t.out, " %s %s",
// padFunc(f, SPACE, v),
// pad)
}
// Next line
fmt.Fprint(t.out, t.newLine)
//t.printLine(true)
}
hasPrinted := false
for i := 0; i <= end; i++ {
v := t.cs[i]
pad := t.pRow
center := t.pCenter
length := len(t.footers[i][0])
if length > 0 {
hasPrinted = true
}
// Set center to be space if length is 0
if length == 0 && !t.borders.Right {
center = SPACE
}
// Print first junction
if i == 0 {
if length > 0 && !t.borders.Left {
center = t.pRow
}
fmt.Fprint(t.out, center)
}
// Pad With space of length is 0
if length == 0 {
pad = SPACE
}
// Ignore left space as it has printed before
if hasPrinted || t.borders.Left {
pad = t.pRow
center = t.pCenter
}
// Change Center end position
if center != SPACE {
if i == end && !t.borders.Right {
center = t.pRow
}
}
// Change Center start position
if center == SPACE {
if i < end && len(t.footers[i+1][0]) != 0 {
if !t.borders.Left {
center = t.pRow
} else {
center = t.pCenter
}
}
}
// Print the footer
fmt.Fprintf(t.out, "%s%s%s%s",
pad,
strings.Repeat(string(pad), v),
pad,
center)
}
fmt.Fprint(t.out, t.newLine)
}
// Print caption text
func (t Table) printCaption() {
width := t.getTableWidth()
paragraph, _ := WrapString(t.captionText, width)
for linecount := 0; linecount < len(paragraph); linecount++ {
fmt.Fprintln(t.out, paragraph[linecount])
}
}
// Calculate the total number of characters in a row
func (t Table) getTableWidth() int {
var chars int
for _, v := range t.cs {
chars += v
}
// Add chars, spaces, seperators to calculate the total width of the table.
// ncols := t.colSize
// spaces := ncols * 2
// seps := ncols + 1
return (chars + (3 * t.colSize) + 2)
}
func (t Table) printRows() {
for i, lines := range t.lines {
t.printRow(lines, i)
}
}
func (t *Table) fillAlignment(num int) {
if len(t.columnsAlign) < num {
t.columnsAlign = make([]int, num)
for i := range t.columnsAlign {
t.columnsAlign[i] = t.align
}
}
}
// Print Row Information
// Adjust column alignment based on type
func (t *Table) printRow(columns [][]string, rowIdx int) {
// Get Maximum Height
max := t.rs[rowIdx]
total := len(columns)
// TODO Fix uneven col size
// if total < t.colSize {
// for n := t.colSize - total; n < t.colSize ; n++ {
// columns = append(columns, []string{SPACE})
// t.cs[n] = t.mW
// }
//}
// Pad Each Height
pads := []int{}
// Checking for ANSI escape sequences for columns
is_esc_seq := false
if len(t.columnsParams) > 0 {
is_esc_seq = true
}
t.fillAlignment(total)
for i, line := range columns {
length := len(line)
pad := max - length
pads = append(pads, pad)
for n := 0; n < pad; n++ {
columns[i] = append(columns[i], " ")
}
}
//fmt.Println(max, "\n")
for x := 0; x < max; x++ {
for y := 0; y < total; y++ {
// Check if border is set
fmt.Fprint(t.out, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn))
fmt.Fprintf(t.out, SPACE)
str := columns[y][x]
// Embedding escape sequence with column value
if is_esc_seq {
str = format(str, t.columnsParams[y])
}
// This would print alignment
// Default alignment would use multiple configuration
switch t.columnsAlign[y] {
case ALIGN_CENTER: //
fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y]))
case ALIGN_RIGHT:
fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y]))
case ALIGN_LEFT:
fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y]))
default:
if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) {
fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y]))
} else {
fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y]))
// TODO Custom alignment per column
//if max == 1 || pads[y] > 0 {
// fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y]))
//} else {
// fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y]))
//}
}
}
fmt.Fprintf(t.out, SPACE)
}
// Check if border is set
// Replace with space if not set
fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
fmt.Fprint(t.out, t.newLine)
}
if t.rowLine {
t.printLine(true)
}
}
// Print the rows of the table and merge the cells that are identical
func (t *Table) printRowsMergeCells() {
var previousLine []string
var displayCellBorder []bool
var tmpWriter bytes.Buffer
for i, lines := range t.lines {
// We store the display of the current line in a tmp writer, as we need to know which border needs to be print above
previousLine, displayCellBorder = t.printRowMergeCells(&tmpWriter, lines, i, previousLine)
if i > 0 { //We don't need to print borders above first line
if t.rowLine {
t.printLineOptionalCellSeparators(true, displayCellBorder)
}
}
tmpWriter.WriteTo(t.out)
}
//Print the end of the table
if t.rowLine {
t.printLine(true)
}
}
// Print Row Information to a writer and merge identical cells.
// Adjust column alignment based on type
func (t *Table) printRowMergeCells(writer io.Writer, columns [][]string, rowIdx int, previousLine []string) ([]string, []bool) {
// Get Maximum Height
max := t.rs[rowIdx]
total := len(columns)
// Pad Each Height
pads := []int{}
// Checking for ANSI escape sequences for columns
is_esc_seq := false
if len(t.columnsParams) > 0 {
is_esc_seq = true
}
for i, line := range columns {
length := len(line)
pad := max - length
pads = append(pads, pad)
for n := 0; n < pad; n++ {
columns[i] = append(columns[i], " ")
}
}
var displayCellBorder []bool
t.fillAlignment(total)
for x := 0; x < max; x++ {
for y := 0; y < total; y++ {
// Check if border is set
fmt.Fprint(writer, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn))
fmt.Fprintf(writer, SPACE)
str := columns[y][x]
// Embedding escape sequence with column value
if is_esc_seq {
str = format(str, t.columnsParams[y])
}
if t.autoMergeCells {
//Store the full line to merge mutli-lines cells
fullLine := strings.Join(columns[y], " ")
if len(previousLine) > y && fullLine == previousLine[y] && fullLine != "" {
// If this cell is identical to the one above but not empty, we don't display the border and keep the cell empty.
displayCellBorder = append(displayCellBorder, false)
str = ""
} else {
// First line or different content, keep the content and print the cell border
displayCellBorder = append(displayCellBorder, true)
}
}
// This would print alignment
// Default alignment would use multiple configuration
switch t.columnsAlign[y] {
case ALIGN_CENTER: //
fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y]))
case ALIGN_RIGHT:
fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y]))
case ALIGN_LEFT:
fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y]))
default:
if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) {
fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y]))
} else {
fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y]))
}
}
fmt.Fprintf(writer, SPACE)
}
// Check if border is set
// Replace with space if not set
fmt.Fprint(writer, ConditionString(t.borders.Left, t.pColumn, SPACE))
fmt.Fprint(writer, t.newLine)
}
//The new previous line is the current one
previousLine = make([]string, total)
for y := 0; y < total; y++ {
previousLine[y] = strings.Join(columns[y], " ") //Store the full line for multi-lines cells
}
//Returns the newly added line and wether or not a border should be displayed above.
return previousLine, displayCellBorder
}
func (t *Table) parseDimension(str string, colKey, rowKey int) []string {
var (
raw []string
maxWidth int
)
raw = getLines(str)
maxWidth = 0
for _, line := range raw {
if w := DisplayWidth(line); w > maxWidth {
maxWidth = w
}
}
// If wrapping, ensure that all paragraphs in the cell fit in the
// specified width.
if t.autoWrap {
// If there's a maximum allowed width for wrapping, use that.
if maxWidth > t.mW {
maxWidth = t.mW
}
// In the process of doing so, we need to recompute maxWidth. This
// is because perhaps a word in the cell is longer than the
// allowed maximum width in t.mW.
newMaxWidth := maxWidth
newRaw := make([]string, 0, len(raw))
if t.reflowText {
// Make a single paragraph of everything.
raw = []string{strings.Join(raw, " ")}
}
for i, para := range raw {
paraLines, _ := WrapString(para, maxWidth)
for _, line := range paraLines {
if w := DisplayWidth(line); w > newMaxWidth {
newMaxWidth = w
}
}
if i > 0 {
newRaw = append(newRaw, " ")
}
newRaw = append(newRaw, paraLines...)
}
raw = newRaw
maxWidth = newMaxWidth
}
// Store the new known maximum width.
v, ok := t.cs[colKey]
if !ok || v < maxWidth || v == 0 {
t.cs[colKey] = maxWidth
}
// Remember the number of lines for the row printer.
h := len(raw)
v, ok = t.rs[rowKey]
if !ok || v < h || v == 0 {
t.rs[rowKey] = h
}
//fmt.Printf("Raw %+v %d\n", raw, len(raw))
return raw
}

93
vendor/github.com/olekukonko/tablewriter/util.go generated vendored Normal file
View file

@ -0,0 +1,93 @@
// Copyright 2014 Oleku Konko All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
// This module is a Table Writer API for the Go Programming Language.
// The protocols were written in pure Go and works on windows and unix systems
package tablewriter
import (
"math"
"regexp"
"strings"
"github.com/mattn/go-runewidth"
)
var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]")
func DisplayWidth(str string) int {
return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, ""))
}
// Simple Condition for string
// Returns value based on condition
func ConditionString(cond bool, valid, inValid string) string {
if cond {
return valid
}
return inValid
}
func isNumOrSpace(r rune) bool {
return ('0' <= r && r <= '9') || r == ' '
}
// Format Table Header
// Replace _ , . and spaces
func Title(name string) string {
origLen := len(name)
rs := []rune(name)
for i, r := range rs {
switch r {
case '_':
rs[i] = ' '
case '.':
// ignore floating number 0.0
if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) {
rs[i] = ' '
}
}
}
name = string(rs)
name = strings.TrimSpace(name)
if len(name) == 0 && origLen > 0 {
// Keep at least one character. This is important to preserve
// empty lines in multi-line headers/footers.
name = " "
}
return strings.ToUpper(name)
}
// Pad String
// Attempts to place string in the center
func Pad(s, pad string, width int) string {
gap := width - DisplayWidth(s)
if gap > 0 {
gapLeft := int(math.Ceil(float64(gap / 2)))
gapRight := gap - gapLeft
return strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight)
}
return s
}
// Pad String Right position
// This would place string at the left side of the screen
func PadRight(s, pad string, width int) string {
gap := width - DisplayWidth(s)
if gap > 0 {
return s + strings.Repeat(string(pad), gap)
}
return s
}
// Pad String Left position
// This would place string at the right side of the screen
func PadLeft(s, pad string, width int) string {
gap := width - DisplayWidth(s)
if gap > 0 {
return strings.Repeat(string(pad), gap) + s
}
return s
}

2417
vendor/golang.org/x/net/html/parse.go generated vendored Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

601
vendor/golang.org/x/text/language/language.go generated vendored Normal file
View file

@ -0,0 +1,601 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go -output tables.go
package language
// TODO: Remove above NOTE after:
// - verifying that tables are dropped correctly (most notably matcher tables).
import (
"strings"
"golang.org/x/text/internal/language"
"golang.org/x/text/internal/language/compact"
)
// Tag represents a BCP 47 language tag. It is used to specify an instance of a
// specific language or locale. All language tag values are guaranteed to be
// well-formed.
type Tag compact.Tag
func makeTag(t language.Tag) (tag Tag) {
return Tag(compact.Make(t))
}
func (t *Tag) tag() language.Tag {
return (*compact.Tag)(t).Tag()
}
func (t *Tag) isCompact() bool {
return (*compact.Tag)(t).IsCompact()
}
// TODO: improve performance.
func (t *Tag) lang() language.Language { return t.tag().LangID }
func (t *Tag) region() language.Region { return t.tag().RegionID }
func (t *Tag) script() language.Script { return t.tag().ScriptID }
// Make is a convenience wrapper for Parse that omits the error.
// In case of an error, a sensible default is returned.
func Make(s string) Tag {
return Default.Make(s)
}
// Make is a convenience wrapper for c.Parse that omits the error.
// In case of an error, a sensible default is returned.
func (c CanonType) Make(s string) Tag {
t, _ := c.Parse(s)
return t
}
// Raw returns the raw base language, script and region, without making an
// attempt to infer their values.
func (t Tag) Raw() (b Base, s Script, r Region) {
tt := t.tag()
return Base{tt.LangID}, Script{tt.ScriptID}, Region{tt.RegionID}
}
// IsRoot returns true if t is equal to language "und".
func (t Tag) IsRoot() bool {
return compact.Tag(t).IsRoot()
}
// CanonType can be used to enable or disable various types of canonicalization.
type CanonType int
const (
// Replace deprecated base languages with their preferred replacements.
DeprecatedBase CanonType = 1 << iota
// Replace deprecated scripts with their preferred replacements.
DeprecatedScript
// Replace deprecated regions with their preferred replacements.
DeprecatedRegion
// Remove redundant scripts.
SuppressScript
// Normalize legacy encodings. This includes legacy languages defined in
// CLDR as well as bibliographic codes defined in ISO-639.
Legacy
// Map the dominant language of a macro language group to the macro language
// subtag. For example cmn -> zh.
Macro
// The CLDR flag should be used if full compatibility with CLDR is required.
// There are a few cases where language.Tag may differ from CLDR. To follow all
// of CLDR's suggestions, use All|CLDR.
CLDR
// Raw can be used to Compose or Parse without Canonicalization.
Raw CanonType = 0
// Replace all deprecated tags with their preferred replacements.
Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion
// All canonicalizations recommended by BCP 47.
BCP47 = Deprecated | SuppressScript
// All canonicalizations.
All = BCP47 | Legacy | Macro
// Default is the canonicalization used by Parse, Make and Compose. To
// preserve as much information as possible, canonicalizations that remove
// potentially valuable information are not included. The Matcher is
// designed to recognize similar tags that would be the same if
// they were canonicalized using All.
Default = Deprecated | Legacy
canonLang = DeprecatedBase | Legacy | Macro
// TODO: LikelyScript, LikelyRegion: suppress similar to ICU.
)
// canonicalize returns the canonicalized equivalent of the tag and
// whether there was any change.
func canonicalize(c CanonType, t language.Tag) (language.Tag, bool) {
if c == Raw {
return t, false
}
changed := false
if c&SuppressScript != 0 {
if t.LangID.SuppressScript() == t.ScriptID {
t.ScriptID = 0
changed = true
}
}
if c&canonLang != 0 {
for {
if l, aliasType := t.LangID.Canonicalize(); l != t.LangID {
switch aliasType {
case language.Legacy:
if c&Legacy != 0 {
if t.LangID == _sh && t.ScriptID == 0 {
t.ScriptID = _Latn
}
t.LangID = l
changed = true
}
case language.Macro:
if c&Macro != 0 {
// We deviate here from CLDR. The mapping "nb" -> "no"
// qualifies as a typical Macro language mapping. However,
// for legacy reasons, CLDR maps "no", the macro language
// code for Norwegian, to the dominant variant "nb". This
// change is currently under consideration for CLDR as well.
// See https://unicode.org/cldr/trac/ticket/2698 and also
// https://unicode.org/cldr/trac/ticket/1790 for some of the
// practical implications. TODO: this check could be removed
// if CLDR adopts this change.
if c&CLDR == 0 || t.LangID != _nb {
changed = true
t.LangID = l
}
}
case language.Deprecated:
if c&DeprecatedBase != 0 {
if t.LangID == _mo && t.RegionID == 0 {
t.RegionID = _MD
}
t.LangID = l
changed = true
// Other canonicalization types may still apply.
continue
}
}
} else if c&Legacy != 0 && t.LangID == _no && c&CLDR != 0 {
t.LangID = _nb
changed = true
}
break
}
}
if c&DeprecatedScript != 0 {
if t.ScriptID == _Qaai {
changed = true
t.ScriptID = _Zinh
}
}
if c&DeprecatedRegion != 0 {
if r := t.RegionID.Canonicalize(); r != t.RegionID {
changed = true
t.RegionID = r
}
}
return t, changed
}
// Canonicalize returns the canonicalized equivalent of the tag.
func (c CanonType) Canonicalize(t Tag) (Tag, error) {
// First try fast path.
if t.isCompact() {
if _, changed := canonicalize(c, compact.Tag(t).Tag()); !changed {
return t, nil
}
}
// It is unlikely that one will canonicalize a tag after matching. So do
// a slow but simple approach here.
if tag, changed := canonicalize(c, t.tag()); changed {
tag.RemakeString()
return makeTag(tag), nil
}
return t, nil
}
// Confidence indicates the level of certainty for a given return value.
// For example, Serbian may be written in Cyrillic or Latin script.
// The confidence level indicates whether a value was explicitly specified,
// whether it is typically the only possible value, or whether there is
// an ambiguity.
type Confidence int
const (
No Confidence = iota // full confidence that there was no match
Low // most likely value picked out of a set of alternatives
High // value is generally assumed to be the correct match
Exact // exact match or explicitly specified value
)
var confName = []string{"No", "Low", "High", "Exact"}
func (c Confidence) String() string {
return confName[c]
}
// String returns the canonical string representation of the language tag.
func (t Tag) String() string {
return t.tag().String()
}
// MarshalText implements encoding.TextMarshaler.
func (t Tag) MarshalText() (text []byte, err error) {
return t.tag().MarshalText()
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (t *Tag) UnmarshalText(text []byte) error {
var tag language.Tag
err := tag.UnmarshalText(text)
*t = makeTag(tag)
return err
}
// Base returns the base language of the language tag. If the base language is
// unspecified, an attempt will be made to infer it from the context.
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
func (t Tag) Base() (Base, Confidence) {
if b := t.lang(); b != 0 {
return Base{b}, Exact
}
tt := t.tag()
c := High
if tt.ScriptID == 0 && !tt.RegionID.IsCountry() {
c = Low
}
if tag, err := tt.Maximize(); err == nil && tag.LangID != 0 {
return Base{tag.LangID}, c
}
return Base{0}, No
}
// Script infers the script for the language tag. If it was not explicitly given, it will infer
// a most likely candidate.
// If more than one script is commonly used for a language, the most likely one
// is returned with a low confidence indication. For example, it returns (Cyrl, Low)
// for Serbian.
// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined)
// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks
// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts.
// See https://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for
// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified.
// Note that an inferred script is never guaranteed to be the correct one. Latin is
// almost exclusively used for Afrikaans, but Arabic has been used for some texts
// in the past. Also, the script that is commonly used may change over time.
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
func (t Tag) Script() (Script, Confidence) {
if scr := t.script(); scr != 0 {
return Script{scr}, Exact
}
tt := t.tag()
sc, c := language.Script(_Zzzz), No
if scr := tt.LangID.SuppressScript(); scr != 0 {
// Note: it is not always the case that a language with a suppress
// script value is only written in one script (e.g. kk, ms, pa).
if tt.RegionID == 0 {
return Script{scr}, High
}
sc, c = scr, High
}
if tag, err := tt.Maximize(); err == nil {
if tag.ScriptID != sc {
sc, c = tag.ScriptID, Low
}
} else {
tt, _ = canonicalize(Deprecated|Macro, tt)
if tag, err := tt.Maximize(); err == nil && tag.ScriptID != sc {
sc, c = tag.ScriptID, Low
}
}
return Script{sc}, c
}
// Region returns the region for the language tag. If it was not explicitly given, it will
// infer a most likely candidate from the context.
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
func (t Tag) Region() (Region, Confidence) {
if r := t.region(); r != 0 {
return Region{r}, Exact
}
tt := t.tag()
if tt, err := tt.Maximize(); err == nil {
return Region{tt.RegionID}, Low // TODO: differentiate between high and low.
}
tt, _ = canonicalize(Deprecated|Macro, tt)
if tag, err := tt.Maximize(); err == nil {
return Region{tag.RegionID}, Low
}
return Region{_ZZ}, No // TODO: return world instead of undetermined?
}
// Variants returns the variants specified explicitly for this language tag.
// or nil if no variant was specified.
func (t Tag) Variants() []Variant {
if !compact.Tag(t).MayHaveVariants() {
return nil
}
v := []Variant{}
x, str := "", t.tag().Variants()
for str != "" {
x, str = nextToken(str)
v = append(v, Variant{x})
}
return v
}
// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
// specific language are substituted with fields from the parent language.
// The parent for a language may change for newer versions of CLDR.
//
// Parent returns a tag for a less specific language that is mutually
// intelligible or Und if there is no such language. This may not be the same as
// simply stripping the last BCP 47 subtag. For instance, the parent of "zh-TW"
// is "zh-Hant", and the parent of "zh-Hant" is "und".
func (t Tag) Parent() Tag {
return Tag(compact.Tag(t).Parent())
}
// returns token t and the rest of the string.
func nextToken(s string) (t, tail string) {
p := strings.Index(s[1:], "-")
if p == -1 {
return s[1:], ""
}
p++
return s[1:p], s[p:]
}
// Extension is a single BCP 47 extension.
type Extension struct {
s string
}
// String returns the string representation of the extension, including the
// type tag.
func (e Extension) String() string {
return e.s
}
// ParseExtension parses s as an extension and returns it on success.
func ParseExtension(s string) (e Extension, err error) {
ext, err := language.ParseExtension(s)
return Extension{ext}, err
}
// Type returns the one-byte extension type of e. It returns 0 for the zero
// exception.
func (e Extension) Type() byte {
if e.s == "" {
return 0
}
return e.s[0]
}
// Tokens returns the list of tokens of e.
func (e Extension) Tokens() []string {
return strings.Split(e.s, "-")
}
// Extension returns the extension of type x for tag t. It will return
// false for ok if t does not have the requested extension. The returned
// extension will be invalid in this case.
func (t Tag) Extension(x byte) (ext Extension, ok bool) {
if !compact.Tag(t).MayHaveExtensions() {
return Extension{}, false
}
e, ok := t.tag().Extension(x)
return Extension{e}, ok
}
// Extensions returns all extensions of t.
func (t Tag) Extensions() []Extension {
if !compact.Tag(t).MayHaveExtensions() {
return nil
}
e := []Extension{}
for _, ext := range t.tag().Extensions() {
e = append(e, Extension{ext})
}
return e
}
// TypeForKey returns the type associated with the given key, where key and type
// are of the allowed values defined for the Unicode locale extension ('u') in
// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
// TypeForKey will traverse the inheritance chain to get the correct value.
func (t Tag) TypeForKey(key string) string {
if !compact.Tag(t).MayHaveExtensions() {
if key != "rg" && key != "va" {
return ""
}
}
return t.tag().TypeForKey(key)
}
// SetTypeForKey returns a new Tag with the key set to type, where key and type
// are of the allowed values defined for the Unicode locale extension ('u') in
// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
// An empty value removes an existing pair with the same key.
func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
tt, err := t.tag().SetTypeForKey(key, value)
return makeTag(tt), err
}
// NumCompactTags is the number of compact tags. The maximum tag is
// NumCompactTags-1.
const NumCompactTags = compact.NumCompactTags
// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags
// for which data exists in the text repository.The index will change over time
// and should not be stored in persistent storage. If t does not match a compact
// index, exact will be false and the compact index will be returned for the
// first match after repeatedly taking the Parent of t.
func CompactIndex(t Tag) (index int, exact bool) {
id, exact := compact.LanguageID(compact.Tag(t))
return int(id), exact
}
var root = language.Tag{}
// Base is an ISO 639 language code, used for encoding the base language
// of a language tag.
type Base struct {
langID language.Language
}
// ParseBase parses a 2- or 3-letter ISO 639 code.
// It returns a ValueError if s is a well-formed but unknown language identifier
// or another error if another error occurred.
func ParseBase(s string) (Base, error) {
l, err := language.ParseBase(s)
return Base{l}, err
}
// String returns the BCP 47 representation of the base language.
func (b Base) String() string {
return b.langID.String()
}
// ISO3 returns the ISO 639-3 language code.
func (b Base) ISO3() string {
return b.langID.ISO3()
}
// IsPrivateUse reports whether this language code is reserved for private use.
func (b Base) IsPrivateUse() bool {
return b.langID.IsPrivateUse()
}
// Script is a 4-letter ISO 15924 code for representing scripts.
// It is idiomatically represented in title case.
type Script struct {
scriptID language.Script
}
// ParseScript parses a 4-letter ISO 15924 code.
// It returns a ValueError if s is a well-formed but unknown script identifier
// or another error if another error occurred.
func ParseScript(s string) (Script, error) {
sc, err := language.ParseScript(s)
return Script{sc}, err
}
// String returns the script code in title case.
// It returns "Zzzz" for an unspecified script.
func (s Script) String() string {
return s.scriptID.String()
}
// IsPrivateUse reports whether this script code is reserved for private use.
func (s Script) IsPrivateUse() bool {
return s.scriptID.IsPrivateUse()
}
// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions.
type Region struct {
regionID language.Region
}
// EncodeM49 returns the Region for the given UN M.49 code.
// It returns an error if r is not a valid code.
func EncodeM49(r int) (Region, error) {
rid, err := language.EncodeM49(r)
return Region{rid}, err
}
// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
// It returns a ValueError if s is a well-formed but unknown region identifier
// or another error if another error occurred.
func ParseRegion(s string) (Region, error) {
r, err := language.ParseRegion(s)
return Region{r}, err
}
// String returns the BCP 47 representation for the region.
// It returns "ZZ" for an unspecified region.
func (r Region) String() string {
return r.regionID.String()
}
// ISO3 returns the 3-letter ISO code of r.
// Note that not all regions have a 3-letter ISO code.
// In such cases this method returns "ZZZ".
func (r Region) ISO3() string {
return r.regionID.ISO3()
}
// M49 returns the UN M.49 encoding of r, or 0 if this encoding
// is not defined for r.
func (r Region) M49() int {
return r.regionID.M49()
}
// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
// may include private-use tags that are assigned by CLDR and used in this
// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
func (r Region) IsPrivateUse() bool {
return r.regionID.IsPrivateUse()
}
// IsCountry returns whether this region is a country or autonomous area. This
// includes non-standard definitions from CLDR.
func (r Region) IsCountry() bool {
return r.regionID.IsCountry()
}
// IsGroup returns whether this region defines a collection of regions. This
// includes non-standard definitions from CLDR.
func (r Region) IsGroup() bool {
return r.regionID.IsGroup()
}
// Contains returns whether Region c is contained by Region r. It returns true
// if c == r.
func (r Region) Contains(c Region) bool {
return r.regionID.Contains(c.regionID)
}
// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
// In all other cases it returns either the region itself or an error.
//
// This method may return an error for a region for which there exists a
// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
// region will already be canonicalized it was obtained from a Tag that was
// obtained using any of the default methods.
func (r Region) TLD() (Region, error) {
tld, err := r.regionID.TLD()
return Region{tld}, err
}
// Canonicalize returns the region or a possible replacement if the region is
// deprecated. It will not return a replacement for deprecated regions that
// are split into multiple regions.
func (r Region) Canonicalize() Region {
return Region{r.regionID.Canonicalize()}
}
// Variant represents a registered variant of a language as defined by BCP 47.
type Variant struct {
variant string
}
// ParseVariant parses and returns a Variant. An error is returned if s is not
// a valid variant.
func ParseVariant(s string) (Variant, error) {
v, err := language.ParseVariant(s)
return Variant{v.String()}, err
}
// String returns the string representation of the variant.
func (v Variant) String() string {
return v.variant
}

705
vendor/golang.org/x/text/transform/transform.go generated vendored Normal file
View file

@ -0,0 +1,705 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transform provides reader and writer wrappers that transform the
// bytes passing through as well as various transformations. Example
// transformations provided by other packages include normalization and
// conversion between character sets.
package transform // import "golang.org/x/text/transform"
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
var (
// ErrShortDst means that the destination buffer was too short to
// receive all of the transformed bytes.
ErrShortDst = errors.New("transform: short destination buffer")
// ErrShortSrc means that the source buffer has insufficient data to
// complete the transformation.
ErrShortSrc = errors.New("transform: short source buffer")
// ErrEndOfSpan means that the input and output (the transformed input)
// are not identical.
ErrEndOfSpan = errors.New("transform: input and output are not identical")
// errInconsistentByteCount means that Transform returned success (nil
// error) but also returned nSrc inconsistent with the src argument.
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
// errShortInternal means that an internal buffer is not large enough
// to make progress and the Transform operation must be aborted.
errShortInternal = errors.New("transform: short internal buffer")
)
// Transformer transforms bytes.
type Transformer interface {
// Transform writes to dst the transformed bytes read from src, and
// returns the number of dst bytes written and src bytes read. The
// atEOF argument tells whether src represents the last bytes of the
// input.
//
// Callers should always process the nDst bytes produced and account
// for the nSrc bytes consumed before considering the error err.
//
// A nil error means that all of the transformed bytes (whether freshly
// transformed from src or left over from previous Transform calls)
// were written to dst. A nil error can be returned regardless of
// whether atEOF is true. If err is nil then nSrc must equal len(src);
// the converse is not necessarily true.
//
// ErrShortDst means that dst was too short to receive all of the
// transformed bytes. ErrShortSrc means that src had insufficient data
// to complete the transformation. If both conditions apply, then
// either error may be returned. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
// Reset resets the state and allows a Transformer to be reused.
Reset()
}
// SpanningTransformer extends the Transformer interface with a Span method
// that determines how much of the input already conforms to the Transformer.
type SpanningTransformer interface {
Transformer
// Span returns a position in src such that transforming src[:n] results in
// identical output src[:n] for these bytes. It does not necessarily return
// the largest such n. The atEOF argument tells whether src represents the
// last bytes of the input.
//
// Callers should always account for the n bytes consumed before
// considering the error err.
//
// A nil error means that all input bytes are known to be identical to the
// output produced by the Transformer. A nil error can be returned
// regardless of whether atEOF is true. If err is nil, then n must
// equal len(src); the converse is not necessarily true.
//
// ErrEndOfSpan means that the Transformer output may differ from the
// input after n bytes. Note that n may be len(src), meaning that the output
// would contain additional bytes after otherwise identical output.
// ErrShortSrc means that src had insufficient data to determine whether the
// remaining bytes would change. Other than the error conditions listed
// here, implementations are free to report other errors that arise.
//
// Calling Span can modify the Transformer state as a side effect. In
// effect, it does the transformation just as calling Transform would, only
// without copying to a destination buffer and only up to a point it can
// determine the input and output bytes are the same. This is obviously more
// limited than calling Transform, but can be more efficient in terms of
// copying and allocating buffers. Calls to Span and Transform may be
// interleaved.
Span(src []byte, atEOF bool) (n int, err error)
}
// NopResetter can be embedded by implementations of Transformer to add a nop
// Reset method.
type NopResetter struct{}
// Reset implements the Reset method of the Transformer interface.
func (NopResetter) Reset() {}
// Reader wraps another io.Reader by transforming the bytes read.
type Reader struct {
r io.Reader
t Transformer
err error
// dst[dst0:dst1] contains bytes that have been transformed by t but
// not yet copied out via Read.
dst []byte
dst0, dst1 int
// src[src0:src1] contains bytes that have been read from r but not
// yet transformed through t.
src []byte
src0, src1 int
// transformComplete is whether the transformation is complete,
// regardless of whether or not it was successful.
transformComplete bool
}
const defaultBufSize = 4096
// NewReader returns a new Reader that wraps r by transforming the bytes read
// via t. It calls Reset on t.
func NewReader(r io.Reader, t Transformer) *Reader {
t.Reset()
return &Reader{
r: r,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Read implements the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
n, err := 0, error(nil)
for {
// Copy out any transformed bytes and return the final error if we are done.
if r.dst0 != r.dst1 {
n = copy(p, r.dst[r.dst0:r.dst1])
r.dst0 += n
if r.dst0 == r.dst1 && r.transformComplete {
return n, r.err
}
return n, nil
} else if r.transformComplete {
return 0, r.err
}
// Try to transform some source bytes, or to flush the transformer if we
// are out of source bytes. We do this even if r.r.Read returned an error.
// As the io.Reader documentation says, "process the n > 0 bytes returned
// before considering the error".
if r.src0 != r.src1 || r.err != nil {
r.dst0 = 0
r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
r.src0 += n
switch {
case err == nil:
if r.src0 != r.src1 {
r.err = errInconsistentByteCount
}
// The Transform call was successful; we are complete if we
// cannot read more bytes into src.
r.transformComplete = r.err != nil
continue
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
// Make room in dst by copying out, and try again.
continue
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
// Read more bytes into src via the code below, and try again.
default:
r.transformComplete = true
// The reader error (r.err) takes precedence over the
// transformer error (err) unless r.err is nil or io.EOF.
if r.err == nil || r.err == io.EOF {
r.err = err
}
continue
}
}
// Move any untransformed source bytes to the start of the buffer
// and read more bytes.
if r.src0 != 0 {
r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
}
n, r.err = r.r.Read(r.src[r.src1:])
r.src1 += n
}
}
// TODO: implement ReadByte (and ReadRune??).
// Writer wraps another io.Writer by transforming the bytes read.
// The user needs to call Close to flush unwritten bytes that may
// be buffered.
type Writer struct {
w io.Writer
t Transformer
dst []byte
// src[:n] contains bytes that have not yet passed through t.
src []byte
n int
}
// NewWriter returns a new Writer that wraps w by transforming the bytes written
// via t. It calls Reset on t.
func NewWriter(w io.Writer, t Transformer) *Writer {
t.Reset()
return &Writer{
w: w,
t: t,
dst: make([]byte, defaultBufSize),
src: make([]byte, defaultBufSize),
}
}
// Write implements the io.Writer interface. If there are not enough
// bytes available to complete a Transform, the bytes will be buffered
// for the next write. Call Close to convert the remaining bytes.
func (w *Writer) Write(data []byte) (n int, err error) {
src := data
if w.n > 0 {
// Append bytes from data to the last remainder.
// TODO: limit the amount copied on first try.
n = copy(w.src[w.n:], data)
w.n += n
src = w.src[:w.n]
}
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, false)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return n, werr
}
src = src[nSrc:]
if w.n == 0 {
n += nSrc
} else if len(src) <= n {
// Enough bytes from w.src have been consumed. We make src point
// to data instead to reduce the copying.
w.n = 0
n -= len(src)
src = data[n:]
if n < len(data) && (err == nil || err == ErrShortSrc) {
continue
}
}
switch err {
case ErrShortDst:
// This error is okay as long as we are making progress.
if nDst > 0 || nSrc > 0 {
continue
}
case ErrShortSrc:
if len(src) < len(w.src) {
m := copy(w.src, src)
// If w.n > 0, bytes from data were already copied to w.src and n
// was already set to the number of bytes consumed.
if w.n == 0 {
n += m
}
w.n = m
err = nil
} else if nDst > 0 || nSrc > 0 {
// Not enough buffer to store the remainder. Keep processing as
// long as there is progress. Without this case, transforms that
// require a lookahead larger than the buffer may result in an
// error. This is not something one may expect to be common in
// practice, but it may occur when buffers are set to small
// sizes during testing.
continue
}
case nil:
if w.n > 0 {
err = errInconsistentByteCount
}
}
return n, err
}
}
// Close implements the io.Closer interface.
func (w *Writer) Close() error {
src := w.src[:w.n]
for {
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
return werr
}
if err != ErrShortDst {
return err
}
src = src[nSrc:]
}
}
type nop struct{ NopResetter }
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
n := copy(dst, src)
if n < len(src) {
err = ErrShortDst
}
return n, n, err
}
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
return len(src), nil
}
type discard struct{ NopResetter }
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return 0, len(src), nil
}
var (
// Discard is a Transformer for which all Transform calls succeed
// by consuming all bytes and writing nothing.
Discard Transformer = discard{}
// Nop is a SpanningTransformer that copies src to dst.
Nop SpanningTransformer = nop{}
)
// chain is a sequence of links. A chain with N Transformers has N+1 links and
// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst
// buffers given to chain.Transform and the middle N-1 buffers are intermediate
// buffers owned by the chain. The i'th link transforms bytes from the i'th
// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer
// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N).
type chain struct {
link []link
err error
// errStart is the index at which the error occurred plus 1. Processing
// errStart at this level at the next call to Transform. As long as
// errStart > 0, chain will not consume any more source bytes.
errStart int
}
func (c *chain) fatalError(errIndex int, err error) {
if i := errIndex + 1; i > c.errStart {
c.errStart = i
c.err = err
}
}
type link struct {
t Transformer
// b[p:n] holds the bytes to be transformed by t.
b []byte
p int
n int
}
func (l *link) src() []byte {
return l.b[l.p:l.n]
}
func (l *link) dst() []byte {
return l.b[l.n:]
}
// Chain returns a Transformer that applies t in sequence.
func Chain(t ...Transformer) Transformer {
if len(t) == 0 {
return nop{}
}
c := &chain{link: make([]link, len(t)+1)}
for i, tt := range t {
c.link[i].t = tt
}
// Allocate intermediate buffers.
b := make([][defaultBufSize]byte, len(t)-1)
for i := range b {
c.link[i+1].b = b[i][:]
}
return c
}
// Reset resets the state of Chain. It calls Reset on all the Transformers.
func (c *chain) Reset() {
for i, l := range c.link {
if l.t != nil {
l.t.Reset()
}
c.link[i].p, c.link[i].n = 0, 0
}
}
// TODO: make chain use Span (is going to be fun to implement!)
// Transform applies the transformers of c in sequence.
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// Set up src and dst in the chain.
srcL := &c.link[0]
dstL := &c.link[len(c.link)-1]
srcL.b, srcL.p, srcL.n = src, 0, len(src)
dstL.b, dstL.n = dst, 0
var lastFull, needProgress bool // for detecting progress
// i is the index of the next Transformer to apply, for i in [low, high].
// low is the lowest index for which c.link[low] may still produce bytes.
// high is the highest index for which c.link[high] has a Transformer.
// The error returned by Transform determines whether to increase or
// decrease i. We try to completely fill a buffer before converting it.
for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; {
in, out := &c.link[i], &c.link[i+1]
nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i)
out.n += nDst
in.p += nSrc
if i > 0 && in.p == in.n {
in.p, in.n = 0, 0
}
needProgress, lastFull = lastFull, false
switch err0 {
case ErrShortDst:
// Process the destination buffer next. Return if we are already
// at the high index.
if i == high {
return dstL.n, srcL.p, ErrShortDst
}
if out.n != 0 {
i++
// If the Transformer at the next index is not able to process any
// source bytes there is nothing that can be done to make progress
// and the bytes will remain unprocessed. lastFull is used to
// detect this and break out of the loop with a fatal error.
lastFull = true
continue
}
// The destination buffer was too small, but is completely empty.
// Return a fatal error as this transformation can never complete.
c.fatalError(i, errShortInternal)
case ErrShortSrc:
if i == 0 {
// Save ErrShortSrc in err. All other errors take precedence.
err = ErrShortSrc
break
}
// Source bytes were depleted before filling up the destination buffer.
// Verify we made some progress, move the remaining bytes to the errStart
// and try to get more source bytes.
if needProgress && nSrc == 0 || in.n-in.p == len(in.b) {
// There were not enough source bytes to proceed while the source
// buffer cannot hold any more bytes. Return a fatal error as this
// transformation can never complete.
c.fatalError(i, errShortInternal)
break
}
// in.b is an internal buffer and we can make progress.
in.p, in.n = 0, copy(in.b, in.src())
fallthrough
case nil:
// if i == low, we have depleted the bytes at index i or any lower levels.
// In that case we increase low and i. In all other cases we decrease i to
// fetch more bytes before proceeding to the next index.
if i > low {
i--
continue
}
default:
c.fatalError(i, err0)
}
// Exhausted level low or fatal error: increase low and continue
// to process the bytes accepted so far.
i++
low = i
}
// If c.errStart > 0, this means we found a fatal error. We will clear
// all upstream buffers. At this point, no more progress can be made
// downstream, as Transform would have bailed while handling ErrShortDst.
if c.errStart > 0 {
for i := 1; i < c.errStart; i++ {
c.link[i].p, c.link[i].n = 0, 0
}
err, c.errStart, c.err = c.err, 0, nil
}
return dstL.n, srcL.p, err
}
// Deprecated: Use runes.Remove instead.
func RemoveFunc(f func(r rune) bool) Transformer {
return removeF(f)
}
type removeF func(r rune) bool
func (removeF) Reset() {}
// Transform implements the Transformer interface.
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
if r = rune(src[0]); r < utf8.RuneSelf {
sz = 1
} else {
r, sz = utf8.DecodeRune(src)
if sz == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src) {
err = ErrShortSrc
break
}
// We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(r) {
if nDst+3 > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], "\uFFFD")
}
nSrc++
continue
}
}
if !t(r) {
if nDst+sz > len(dst) {
err = ErrShortDst
break
}
nDst += copy(dst[nDst:], src[:sz])
}
nSrc += sz
}
return
}
// grow returns a new []byte that is longer than b, and copies the first n bytes
// of b to the start of the new slice.
func grow(b []byte, n int) []byte {
m := len(b)
if m <= 32 {
m = 64
} else if m <= 256 {
m *= 2
} else {
m += m >> 1
}
buf := make([]byte, m)
copy(buf, b[:n])
return buf
}
const initialBufSize = 128
// String returns a string with the result of converting s[:n] using t, where
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
func String(t Transformer, s string) (result string, n int, err error) {
t.Reset()
if s == "" {
// Fast path for the common case for empty input. Results in about a
// 86% reduction of running time for BenchmarkStringLowerEmpty.
if _, _, err := t.Transform(nil, nil, true); err == nil {
return "", 0, nil
}
}
// Allocate only once. Note that both dst and src escape when passed to
// Transform.
buf := [2 * initialBufSize]byte{}
dst := buf[:initialBufSize:initialBufSize]
src := buf[initialBufSize : 2*initialBufSize]
// The input string s is transformed in multiple chunks (starting with a
// chunk size of initialBufSize). nDst and nSrc are per-chunk (or
// per-Transform-call) indexes, pDst and pSrc are overall indexes.
nDst, nSrc := 0, 0
pDst, pSrc := 0, 0
// pPrefix is the length of a common prefix: the first pPrefix bytes of the
// result will equal the first pPrefix bytes of s. It is not guaranteed to
// be the largest such value, but if pPrefix, len(result) and len(s) are
// all equal after the final transform (i.e. calling Transform with atEOF
// being true returned nil error) then we don't need to allocate a new
// result string.
pPrefix := 0
for {
// Invariant: pDst == pPrefix && pSrc == pPrefix.
n := copy(src, s[pSrc:])
nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s))
pDst += nDst
pSrc += nSrc
// TODO: let transformers implement an optional Spanner interface, akin
// to norm's QuickSpan. This would even allow us to avoid any allocation.
if !bytes.Equal(dst[:nDst], src[:nSrc]) {
break
}
pPrefix = pSrc
if err == ErrShortDst {
// A buffer can only be short if a transformer modifies its input.
break
} else if err == ErrShortSrc {
if nSrc == 0 {
// No progress was made.
break
}
// Equal so far and !atEOF, so continue checking.
} else if err != nil || pPrefix == len(s) {
return string(s[:pPrefix]), pPrefix, err
}
}
// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc.
// We have transformed the first pSrc bytes of the input s to become pDst
// transformed bytes. Those transformed bytes are discontiguous: the first
// pPrefix of them equal s[:pPrefix] and the last nDst of them equal
// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so
// that they become one contiguous slice: dst[:pDst].
if pPrefix != 0 {
newDst := dst
if pDst > len(newDst) {
newDst = make([]byte, len(s)+nDst-nSrc)
}
copy(newDst[pPrefix:pDst], dst[:nDst])
copy(newDst[:pPrefix], s[:pPrefix])
dst = newDst
}
// Prevent duplicate Transform calls with atEOF being true at the end of
// the input. Also return if we have an unrecoverable error.
if (err == nil && pSrc == len(s)) ||
(err != nil && err != ErrShortDst && err != ErrShortSrc) {
return string(dst[:pDst]), pSrc, err
}
// Transform the remaining input, growing dst and src buffers as necessary.
for {
n := copy(src, s[pSrc:])
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
pDst += nDst
pSrc += nSrc
// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can
// make progress. This may avoid excessive allocations.
if err == ErrShortDst {
if nDst == 0 {
dst = grow(dst, pDst)
}
} else if err == ErrShortSrc {
if nSrc == 0 {
src = grow(src, 0)
}
} else if err != nil || pSrc == len(s) {
return string(dst[:pDst]), pSrc, err
}
}
}
// Bytes returns a new byte slice with the result of converting b[:n] using t,
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
return doAppend(t, 0, make([]byte, len(b)), b)
}
// Append appends the result of converting src[:n] using t to dst, where
// n <= len(src), If err == nil, n will be len(src). It calls Reset on t.
func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) {
if len(dst) == cap(dst) {
n := len(src) + len(dst) // It is okay for this to be 0.
b := make([]byte, n)
dst = b[:copy(b, dst)]
}
return doAppend(t, len(dst), dst[:cap(dst)], src)
}
func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) {
t.Reset()
pSrc := 0
for {
nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true)
pDst += nDst
pSrc += nSrc
if err != ErrShortDst {
return dst[:pDst], pSrc, err
}
// Grow the destination buffer, but do not grow as long as we can make
// progress. This may avoid excessive allocations.
if nDst == 0 {
dst = grow(dst, pDst)
}
}
}

535
vendor/jaytaylor.com/html2text/html2text.go generated vendored Normal file
View file

@ -0,0 +1,535 @@
package html2text
import (
"bytes"
"io"
"regexp"
"strings"
"unicode"
"github.com/olekukonko/tablewriter"
"github.com/ssor/bom"
"golang.org/x/net/html"
"golang.org/x/net/html/atom"
)
// Options provide toggles and overrides to control specific rendering behaviors.
type Options struct {
PrettyTables bool // Turns on pretty ASCII rendering for table elements.
PrettyTablesOptions *PrettyTablesOptions // Configures pretty ASCII rendering for table elements.
OmitLinks bool // Turns on omitting links
}
// PrettyTablesOptions overrides tablewriter behaviors
type PrettyTablesOptions struct {
AutoFormatHeader bool
AutoWrapText bool
ReflowDuringAutoWrap bool
ColWidth int
ColumnSeparator string
RowSeparator string
CenterSeparator string
HeaderAlignment int
FooterAlignment int
Alignment int
ColumnAlignment []int
NewLine string
HeaderLine bool
RowLine bool
AutoMergeCells bool
Borders tablewriter.Border
}
// NewPrettyTablesOptions creates PrettyTablesOptions with default settings
func NewPrettyTablesOptions() *PrettyTablesOptions {
return &PrettyTablesOptions{
AutoFormatHeader: true,
AutoWrapText: true,
ReflowDuringAutoWrap: true,
ColWidth: tablewriter.MAX_ROW_WIDTH,
ColumnSeparator: tablewriter.COLUMN,
RowSeparator: tablewriter.ROW,
CenterSeparator: tablewriter.CENTER,
HeaderAlignment: tablewriter.ALIGN_DEFAULT,
FooterAlignment: tablewriter.ALIGN_DEFAULT,
Alignment: tablewriter.ALIGN_DEFAULT,
ColumnAlignment: []int{},
NewLine: tablewriter.NEWLINE,
HeaderLine: true,
RowLine: false,
AutoMergeCells: false,
Borders: tablewriter.Border{Left: true, Right: true, Bottom: true, Top: true},
}
}
// FromHTMLNode renders text output from a pre-parsed HTML document.
func FromHTMLNode(doc *html.Node, o ...Options) (string, error) {
var options Options
if len(o) > 0 {
options = o[0]
}
ctx := textifyTraverseContext{
buf: bytes.Buffer{},
options: options,
}
if err := ctx.traverse(doc); err != nil {
return "", err
}
text := strings.TrimSpace(newlineRe.ReplaceAllString(
strings.Replace(ctx.buf.String(), "\n ", "\n", -1), "\n\n"),
)
return text, nil
}
// FromReader renders text output after parsing HTML for the specified
// io.Reader.
func FromReader(reader io.Reader, options ...Options) (string, error) {
newReader, err := bom.NewReaderWithoutBom(reader)
if err != nil {
return "", err
}
doc, err := html.Parse(newReader)
if err != nil {
return "", err
}
return FromHTMLNode(doc, options...)
}
// FromString parses HTML from the input string, then renders the text form.
func FromString(input string, options ...Options) (string, error) {
bs := bom.CleanBom([]byte(input))
text, err := FromReader(bytes.NewReader(bs), options...)
if err != nil {
return "", err
}
return text, nil
}
var (
spacingRe = regexp.MustCompile(`[ \r\n\t]+`)
newlineRe = regexp.MustCompile(`\n\n+`)
)
// traverseTableCtx holds text-related context.
type textifyTraverseContext struct {
buf bytes.Buffer
prefix string
tableCtx tableTraverseContext
options Options
endsWithSpace bool
justClosedDiv bool
blockquoteLevel int
lineLength int
isPre bool
}
// tableTraverseContext holds table ASCII-form related context.
type tableTraverseContext struct {
header []string
body [][]string
footer []string
tmpRow int
isInFooter bool
}
func (tableCtx *tableTraverseContext) init() {
tableCtx.body = [][]string{}
tableCtx.header = []string{}
tableCtx.footer = []string{}
tableCtx.isInFooter = false
tableCtx.tmpRow = 0
}
func (ctx *textifyTraverseContext) handleElement(node *html.Node) error {
ctx.justClosedDiv = false
switch node.DataAtom {
case atom.Br:
return ctx.emit("\n")
case atom.H1, atom.H2, atom.H3:
subCtx := textifyTraverseContext{}
if err := subCtx.traverseChildren(node); err != nil {
return err
}
str := subCtx.buf.String()
dividerLen := 0
for _, line := range strings.Split(str, "\n") {
if lineLen := len([]rune(line)); lineLen-1 > dividerLen {
dividerLen = lineLen - 1
}
}
var divider string
if node.DataAtom == atom.H1 {
divider = strings.Repeat("*", dividerLen)
} else {
divider = strings.Repeat("-", dividerLen)
}
if node.DataAtom == atom.H3 {
return ctx.emit("\n\n" + str + "\n" + divider + "\n\n")
}
return ctx.emit("\n\n" + divider + "\n" + str + "\n" + divider + "\n\n")
case atom.Blockquote:
ctx.blockquoteLevel++
ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel) + " "
if err := ctx.emit("\n"); err != nil {
return err
}
if ctx.blockquoteLevel == 1 {
if err := ctx.emit("\n"); err != nil {
return err
}
}
if err := ctx.traverseChildren(node); err != nil {
return err
}
ctx.blockquoteLevel--
ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel)
if ctx.blockquoteLevel > 0 {
ctx.prefix += " "
}
return ctx.emit("\n\n")
case atom.Div:
if ctx.lineLength > 0 {
if err := ctx.emit("\n"); err != nil {
return err
}
}
if err := ctx.traverseChildren(node); err != nil {
return err
}
var err error
if !ctx.justClosedDiv {
err = ctx.emit("\n")
}
ctx.justClosedDiv = true
return err
case atom.Li:
if err := ctx.emit("* "); err != nil {
return err
}
if err := ctx.traverseChildren(node); err != nil {
return err
}
return ctx.emit("\n")
case atom.B, atom.Strong:
subCtx := textifyTraverseContext{}
subCtx.endsWithSpace = true
if err := subCtx.traverseChildren(node); err != nil {
return err
}
str := subCtx.buf.String()
return ctx.emit("*" + str + "*")
case atom.A:
linkText := ""
// For simple link element content with single text node only, peek at the link text.
if node.FirstChild != nil && node.FirstChild.NextSibling == nil && node.FirstChild.Type == html.TextNode {
linkText = node.FirstChild.Data
}
// If image is the only child, take its alt text as the link text.
if img := node.FirstChild; img != nil && node.LastChild == img && img.DataAtom == atom.Img {
if altText := getAttrVal(img, "alt"); altText != "" {
if err := ctx.emit(altText); err != nil {
return err
}
}
} else if err := ctx.traverseChildren(node); err != nil {
return err
}
hrefLink := ""
if attrVal := getAttrVal(node, "href"); attrVal != "" {
attrVal = ctx.normalizeHrefLink(attrVal)
// Don't print link href if it matches link element content or if the link is empty.
if !ctx.options.OmitLinks && attrVal != "" && linkText != attrVal {
hrefLink = "( " + attrVal + " )"
}
}
return ctx.emit(hrefLink)
case atom.P, atom.Ul:
return ctx.paragraphHandler(node)
case atom.Table, atom.Tfoot, atom.Th, atom.Tr, atom.Td:
if ctx.options.PrettyTables {
return ctx.handleTableElement(node)
} else if node.DataAtom == atom.Table {
return ctx.paragraphHandler(node)
}
return ctx.traverseChildren(node)
case atom.Pre:
ctx.isPre = true
err := ctx.traverseChildren(node)
ctx.isPre = false
return err
case atom.Style, atom.Script, atom.Head:
// Ignore the subtree.
return nil
default:
return ctx.traverseChildren(node)
}
}
// paragraphHandler renders node children surrounded by double newlines.
func (ctx *textifyTraverseContext) paragraphHandler(node *html.Node) error {
if err := ctx.emit("\n\n"); err != nil {
return err
}
if err := ctx.traverseChildren(node); err != nil {
return err
}
return ctx.emit("\n\n")
}
// handleTableElement is only to be invoked when options.PrettyTables is active.
func (ctx *textifyTraverseContext) handleTableElement(node *html.Node) error {
if !ctx.options.PrettyTables {
panic("handleTableElement invoked when PrettyTables not active")
}
switch node.DataAtom {
case atom.Table:
if err := ctx.emit("\n\n"); err != nil {
return err
}
// Re-intialize all table context.
ctx.tableCtx.init()
// Browse children, enriching context with table data.
if err := ctx.traverseChildren(node); err != nil {
return err
}
buf := &bytes.Buffer{}
table := tablewriter.NewWriter(buf)
if ctx.options.PrettyTablesOptions != nil {
options := ctx.options.PrettyTablesOptions
table.SetAutoFormatHeaders(options.AutoFormatHeader)
table.SetAutoWrapText(options.AutoWrapText)
table.SetReflowDuringAutoWrap(options.ReflowDuringAutoWrap)
table.SetColWidth(options.ColWidth)
table.SetColumnSeparator(options.ColumnSeparator)
table.SetRowSeparator(options.RowSeparator)
table.SetCenterSeparator(options.CenterSeparator)
table.SetHeaderAlignment(options.HeaderAlignment)
table.SetFooterAlignment(options.FooterAlignment)
table.SetAlignment(options.Alignment)
table.SetColumnAlignment(options.ColumnAlignment)
table.SetNewLine(options.NewLine)
table.SetHeaderLine(options.HeaderLine)
table.SetRowLine(options.RowLine)
table.SetAutoMergeCells(options.AutoMergeCells)
table.SetBorders(options.Borders)
}
table.SetHeader(ctx.tableCtx.header)
table.SetFooter(ctx.tableCtx.footer)
table.AppendBulk(ctx.tableCtx.body)
// Render the table using ASCII.
table.Render()
if err := ctx.emit(buf.String()); err != nil {
return err
}
return ctx.emit("\n\n")
case atom.Tfoot:
ctx.tableCtx.isInFooter = true
if err := ctx.traverseChildren(node); err != nil {
return err
}
ctx.tableCtx.isInFooter = false
case atom.Tr:
ctx.tableCtx.body = append(ctx.tableCtx.body, []string{})
if err := ctx.traverseChildren(node); err != nil {
return err
}
ctx.tableCtx.tmpRow++
case atom.Th:
res, err := ctx.renderEachChild(node)
if err != nil {
return err
}
ctx.tableCtx.header = append(ctx.tableCtx.header, res)
case atom.Td:
res, err := ctx.renderEachChild(node)
if err != nil {
return err
}
if ctx.tableCtx.isInFooter {
ctx.tableCtx.footer = append(ctx.tableCtx.footer, res)
} else {
ctx.tableCtx.body[ctx.tableCtx.tmpRow] = append(ctx.tableCtx.body[ctx.tableCtx.tmpRow], res)
}
}
return nil
}
func (ctx *textifyTraverseContext) traverse(node *html.Node) error {
switch node.Type {
default:
return ctx.traverseChildren(node)
case html.TextNode:
var data string
if ctx.isPre {
data = node.Data
} else {
data = strings.TrimSpace(spacingRe.ReplaceAllString(node.Data, " "))
}
return ctx.emit(data)
case html.ElementNode:
return ctx.handleElement(node)
}
}
func (ctx *textifyTraverseContext) traverseChildren(node *html.Node) error {
for c := node.FirstChild; c != nil; c = c.NextSibling {
if err := ctx.traverse(c); err != nil {
return err
}
}
return nil
}
func (ctx *textifyTraverseContext) emit(data string) error {
if data == "" {
return nil
}
var (
lines = ctx.breakLongLines(data)
err error
)
for _, line := range lines {
runes := []rune(line)
startsWithSpace := unicode.IsSpace(runes[0])
if !startsWithSpace && !ctx.endsWithSpace && !strings.HasPrefix(data, ".") {
if err = ctx.buf.WriteByte(' '); err != nil {
return err
}
ctx.lineLength++
}
ctx.endsWithSpace = unicode.IsSpace(runes[len(runes)-1])
for _, c := range line {
if _, err = ctx.buf.WriteString(string(c)); err != nil {
return err
}
ctx.lineLength++
if c == '\n' {
ctx.lineLength = 0
if ctx.prefix != "" {
if _, err = ctx.buf.WriteString(ctx.prefix); err != nil {
return err
}
}
}
}
}
return nil
}
const maxLineLen = 74
func (ctx *textifyTraverseContext) breakLongLines(data string) []string {
// Only break lines when in blockquotes.
if ctx.blockquoteLevel == 0 {
return []string{data}
}
var (
ret = []string{}
runes = []rune(data)
l = len(runes)
existing = ctx.lineLength
)
if existing >= maxLineLen {
ret = append(ret, "\n")
existing = 0
}
for l+existing > maxLineLen {
i := maxLineLen - existing
for i >= 0 && !unicode.IsSpace(runes[i]) {
i--
}
if i == -1 {
// No spaces, so go the other way.
i = maxLineLen - existing
for i < l && !unicode.IsSpace(runes[i]) {
i++
}
}
ret = append(ret, string(runes[:i])+"\n")
for i < l && unicode.IsSpace(runes[i]) {
i++
}
runes = runes[i:]
l = len(runes)
existing = 0
}
if len(runes) > 0 {
ret = append(ret, string(runes))
}
return ret
}
func (ctx *textifyTraverseContext) normalizeHrefLink(link string) string {
link = strings.TrimSpace(link)
link = strings.TrimPrefix(link, "mailto:")
return link
}
// renderEachChild visits each direct child of a node and collects the sequence of
// textuual representaitons separated by a single newline.
func (ctx *textifyTraverseContext) renderEachChild(node *html.Node) (string, error) {
buf := &bytes.Buffer{}
for c := node.FirstChild; c != nil; c = c.NextSibling {
s, err := FromHTMLNode(c, ctx.options)
if err != nil {
return "", err
}
if _, err = buf.WriteString(s); err != nil {
return "", err
}
if c.NextSibling != nil {
if err = buf.WriteByte('\n'); err != nil {
return "", err
}
}
}
return buf.String(), nil
}
func getAttrVal(node *html.Node, attrName string) string {
for _, attr := range node.Attr {
if attr.Key == attrName {
return attr.Val
}
}
return ""
}

211
vendor/vendor.json vendored Normal file
View file

@ -0,0 +1,211 @@
{
"comment": "",
"ignore": "test",
"package": [
{
"checksumSHA1": "6C3LYFDsp1HNrgzVxCcBlKCifC0=",
"path": "github.com/PuerkitoBio/goquery",
"revision": "3dcf72e6c17f694381a21592651ca1464ded0e10",
"revisionTime": "2019-01-09T23:07:04Z"
},
{
"checksumSHA1": "7DQlhUmi8waYHuk3kvrWiTLVygM=",
"path": "github.com/andybalholm/cascadia",
"revision": "680b6a57bda4f657485ad44bdea42342ead737bc",
"revisionTime": "2018-10-12T15:44:24Z"
},
{
"checksumSHA1": "5UWAX/5F5bqC7tKtDb2QFFRqwAs=",
"path": "github.com/chilts/sid",
"revision": "250d10e55bf450834d37cb13b7dae8816ada9b28",
"revisionTime": "2018-09-28T23:21:30Z"
},
{
"checksumSHA1": "qR7D38Zmn9TPUvGn64k+r0+Kq1c=",
"path": "github.com/mattn/go-runewidth",
"revision": "703b5e6b11ae25aeb2af9ebb5d5fdf8fa2575211",
"revisionTime": "2018-12-18T00:06:49Z"
},
{
"checksumSHA1": "F0EUIs4TuxSnrzbd/SfZdseDtmc=",
"path": "github.com/mattn/go-xmpp",
"revision": "6093f50721ed2204a87a81109ca5a466a5bec6c1",
"revisionTime": "2019-01-24T09:32:44Z"
},
{
"checksumSHA1": "0etQIvNU8zcr74rQKJLeH5O+MzU=",
"path": "github.com/mmcdole/gofeed",
"revision": "0e68beaf6fdf215bd1fe42a09f6de292c7032359",
"revisionTime": "2019-04-20T15:49:28Z"
},
{
"checksumSHA1": "+plUZFo1eUTODCmc1ptE2VUuWM4=",
"path": "github.com/mmcdole/gofeed/atom",
"revision": "0e68beaf6fdf215bd1fe42a09f6de292c7032359",
"revisionTime": "2019-04-20T15:49:28Z"
},
{
"checksumSHA1": "30XHSW5SZy4yL+ezmw0xyhGdM+Y=",
"path": "github.com/mmcdole/gofeed/extensions",
"revision": "0e68beaf6fdf215bd1fe42a09f6de292c7032359",
"revisionTime": "2019-04-20T15:49:28Z"
},
{
"checksumSHA1": "Aw0Zm44X0crwxj4KZTYqK2C3zT0=",
"path": "github.com/mmcdole/gofeed/internal/shared",
"revision": "0e68beaf6fdf215bd1fe42a09f6de292c7032359",
"revisionTime": "2019-04-20T15:49:28Z"
},
{
"checksumSHA1": "4Em5hO79HIPXmUYb9s/zh6CuQ0c=",
"path": "github.com/mmcdole/gofeed/rss",
"revision": "0e68beaf6fdf215bd1fe42a09f6de292c7032359",
"revisionTime": "2019-04-20T15:49:28Z"
},
{
"checksumSHA1": "dMr0U8HXo4PGD9qDPxvh6Iizzh0=",
"path": "github.com/mmcdole/goxpp",
"revision": "0068e33feabfc0086c7aeb58a9603f91c061c89f",
"revisionTime": "2018-10-12T15:49:47Z"
},
{
"checksumSHA1": "HZJ2dhzXoMi8n+iY80A9vsnyQUk=",
"path": "github.com/olekukonko/tablewriter",
"revision": "1c0837c15a0bac7871496dfce5dcdd308e0a330f",
"revisionTime": "2019-05-08T01:39:46Z"
},
{
"checksumSHA1": "qErubHtC7DAFBnEQkMTuKDtfFTU=",
"path": "github.com/ssor/bom",
"revision": "6386211fdfcf24c0bfbdaceafd02849ed9a8a509",
"revisionTime": "2017-07-18T12:35:48Z"
},
{
"checksumSHA1": "bONEZcbkYKiPyABrecOLzHomjPU=",
"path": "golang.org/x/net/html",
"revision": "f3200d17e092c607f615320ecaad13d87ad9a2b3",
"revisionTime": "2019-05-22T15:39:15Z"
},
{
"checksumSHA1": "xwhqe/igHQrY3IhqDwzo6j7qpm8=",
"path": "golang.org/x/net/html/atom",
"revision": "f3200d17e092c607f615320ecaad13d87ad9a2b3",
"revisionTime": "2019-05-22T15:39:15Z"
},
{
"checksumSHA1": "barUU39reQ7LdgYLA323hQ/UGy4=",
"path": "golang.org/x/net/html/charset",
"revision": "f3200d17e092c607f615320ecaad13d87ad9a2b3",
"revisionTime": "2019-05-22T15:39:15Z"
},
{
"checksumSHA1": "tqqo7DEeFCclb58XbN44WwdpWww=",
"path": "golang.org/x/text/encoding",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "DSdlK4MKI/a3U8Zaee2XKBe01Fo=",
"path": "golang.org/x/text/encoding/charmap",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "SbJkfe5G/5tji96Pa15/ePDOCtk=",
"path": "golang.org/x/text/encoding/htmlindex",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "zeHyHebIZl1tGuwGllIhjfci+wI=",
"path": "golang.org/x/text/encoding/internal",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "hT7VaIBlkm2YpKulgnjqXNdicGQ=",
"path": "golang.org/x/text/encoding/internal/identifier",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "2YqVpmvjWGEBATyUphTP1MS34JE=",
"path": "golang.org/x/text/encoding/japanese",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "+ErWCAdaMwO4PLtrk9D/Hh+7oQM=",
"path": "golang.org/x/text/encoding/korean",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "mTuZi5urYwgDIO8+Gfql2pv8Vwg=",
"path": "golang.org/x/text/encoding/simplifiedchinese",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "D+VI4j0Wjzr8SeupWdOB5KBdFOw=",
"path": "golang.org/x/text/encoding/traditionalchinese",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "bAJTZJ3IGJdNmN/PSlRMRxWtxec=",
"path": "golang.org/x/text/encoding/unicode",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "ybE4kAPmNPV/dvShuG86AmLbhdE=",
"path": "golang.org/x/text/internal/language",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "VDwNSsZP6KShjTSwGUQUGJVrs1I=",
"path": "golang.org/x/text/internal/language/compact",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "hyNCcTwMQnV6/MK8uUW9E5H0J0M=",
"path": "golang.org/x/text/internal/tag",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "Qk7dljcrEK1BJkAEZguxAbG9dSo=",
"path": "golang.org/x/text/internal/utf8internal",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "oYNlkS+0TimKOScUz3Hn9QWyz6w=",
"path": "golang.org/x/text/language",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "IV4MN7KGBSocu/5NR3le3sxup4Y=",
"path": "golang.org/x/text/runes",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "R9iBDY+aPnT+8pyRcqGjXq5QixA=",
"path": "golang.org/x/text/transform",
"revision": "342b2e1fbaa52c93f31447ad2c6abc048c63e475",
"revisionTime": "2018-12-15T17:52:45Z"
},
{
"checksumSHA1": "2/9hMw7Y4I42L/PTobKqVldWUAU=",
"path": "jaytaylor.com/html2text",
"revision": "01ec452cbe43774f989516272881441cae40c16b",
"revisionTime": "2019-03-26T19:55:09Z"
}
],
"rootPath": "salsa.debian.org/mdosch-guest/feed-to-muc"
}