This commit is contained in:
2018-12-13 20:33:29 +01:00
parent 7bca21522a
commit d0d888c160
63 changed files with 7546 additions and 2 deletions

19
vendor/github.com/beevik/etree/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,19 @@
language: go
sudo: false
go:
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- tip
matrix:
allow_failures:
- go: tip
script:
- go vet ./...
- go test -v ./...

9
vendor/github.com/beevik/etree/CONTRIBUTORS generated vendored Normal file
View File

@@ -0,0 +1,9 @@
Brett Vickers (beevik)
Felix Geisendörfer (felixge)
Kamil Kisiel (kisielk)
Graham King (grahamking)
Matt Smith (ma314smith)
Michal Jemala (michaljemala)
Nicolas Piganeau (npiganeau)
Chris Brown (ccbrown)
Earncef Sequeira (earncef)

24
vendor/github.com/beevik/etree/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,24 @@
Copyright 2015 Brett Vickers. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

203
vendor/github.com/beevik/etree/README.md generated vendored Normal file
View File

@@ -0,0 +1,203 @@
[![Build Status](https://travis-ci.org/beevik/etree.svg?branch=master)](https://travis-ci.org/beevik/etree)
[![GoDoc](https://godoc.org/github.com/beevik/etree?status.svg)](https://godoc.org/github.com/beevik/etree)
etree
=====
The etree package is a lightweight, pure go package that expresses XML in
the form of an element tree. Its design was inspired by the Python
[ElementTree](http://docs.python.org/2/library/xml.etree.elementtree.html)
module. Some of the package's features include:
* Represents XML documents as trees of elements for easy traversal.
* Imports, serializes, modifies or creates XML documents from scratch.
* Writes and reads XML to/from files, byte slices, strings and io interfaces.
* Performs simple or complex searches with lightweight XPath-like query APIs.
* Auto-indents XML using spaces or tabs for better readability.
* Implemented in pure go; depends only on standard go libraries.
* Built on top of the go [encoding/xml](http://golang.org/pkg/encoding/xml)
package.
### Creating an XML document
The following example creates an XML document from scratch using the etree
package and outputs its indented contents to stdout.
```go
doc := etree.NewDocument()
doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`)
doc.CreateProcInst("xml-stylesheet", `type="text/xsl" href="style.xsl"`)
people := doc.CreateElement("People")
people.CreateComment("These are all known people")
jon := people.CreateElement("Person")
jon.CreateAttr("name", "Jon")
sally := people.CreateElement("Person")
sally.CreateAttr("name", "Sally")
doc.Indent(2)
doc.WriteTo(os.Stdout)
```
Output:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="style.xsl"?>
<People>
<!--These are all known people-->
<Person name="Jon"/>
<Person name="Sally"/>
</People>
```
### Reading an XML file
Suppose you have a file on disk called `bookstore.xml` containing the
following data:
```xml
<bookstore xmlns:p="urn:schemas-books-com:prices">
<book category="COOKING">
<title lang="en">Everyday Italian</title>
<author>Giada De Laurentiis</author>
<year>2005</year>
<p:price>30.00</p:price>
</book>
<book category="CHILDREN">
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<p:price>29.99</p:price>
</book>
<book category="WEB">
<title lang="en">XQuery Kick Start</title>
<author>James McGovern</author>
<author>Per Bothner</author>
<author>Kurt Cagle</author>
<author>James Linn</author>
<author>Vaidyanathan Nagarajan</author>
<year>2003</year>
<p:price>49.99</p:price>
</book>
<book category="WEB">
<title lang="en">Learning XML</title>
<author>Erik T. Ray</author>
<year>2003</year>
<p:price>39.95</p:price>
</book>
</bookstore>
```
This code reads the file's contents into an etree document.
```go
doc := etree.NewDocument()
if err := doc.ReadFromFile("bookstore.xml"); err != nil {
panic(err)
}
```
You can also read XML from a string, a byte slice, or an `io.Reader`.
### Processing elements and attributes
This example illustrates several ways to access elements and attributes using
etree selection queries.
```go
root := doc.SelectElement("bookstore")
fmt.Println("ROOT element:", root.Tag)
for _, book := range root.SelectElements("book") {
fmt.Println("CHILD element:", book.Tag)
if title := book.SelectElement("title"); title != nil {
lang := title.SelectAttrValue("lang", "unknown")
fmt.Printf(" TITLE: %s (%s)\n", title.Text(), lang)
}
for _, attr := range book.Attr {
fmt.Printf(" ATTR: %s=%s\n", attr.Key, attr.Value)
}
}
```
Output:
```
ROOT element: bookstore
CHILD element: book
TITLE: Everyday Italian (en)
ATTR: category=COOKING
CHILD element: book
TITLE: Harry Potter (en)
ATTR: category=CHILDREN
CHILD element: book
TITLE: XQuery Kick Start (en)
ATTR: category=WEB
CHILD element: book
TITLE: Learning XML (en)
ATTR: category=WEB
```
### Path queries
This example uses etree's path functions to select all book titles that fall
into the category of 'WEB'. The double-slash prefix in the path causes the
search for book elements to occur recursively; book elements may appear at any
level of the XML hierarchy.
```go
for _, t := range doc.FindElements("//book[@category='WEB']/title") {
fmt.Println("Title:", t.Text())
}
```
Output:
```
Title: XQuery Kick Start
Title: Learning XML
```
This example finds the first book element under the root bookstore element and
outputs the tag and text of each of its child elements.
```go
for _, e := range doc.FindElements("./bookstore/book[1]/*") {
fmt.Printf("%s: %s\n", e.Tag, e.Text())
}
```
Output:
```
title: Everyday Italian
author: Giada De Laurentiis
year: 2005
price: 30.00
```
This example finds all books with a price of 49.99 and outputs their titles.
```go
path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title")
for _, e := range doc.FindElementsPath(path) {
fmt.Println(e.Text())
}
```
Output:
```
XQuery Kick Start
```
Note that this example uses the FindElementsPath function, which takes as an
argument a pre-compiled path object. Use precompiled paths when you plan to
search with the same path more than once.
### Other features
These are just a few examples of the things the etree package can do. See the
[documentation](http://godoc.org/github.com/beevik/etree) for a complete
description of its capabilities.
### Contributing
This project accepts contributions. Just fork the repo and submit a pull
request!

27
vendor/github.com/beevik/etree/RELEASE_NOTES.md generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Release v1.0.1
==============
**Changes**
* Added support for absolute etree Path queries. An absolute path begins with
`/` or `//` and begins its search from the element's document root.
* Added [`GetPath`](https://godoc.org/github.com/beevik/etree#Element.GetPath)
and [`GetRelativePath`](https://godoc.org/github.com/beevik/etree#Element.GetRelativePath)
functions to the [`Element`](https://godoc.org/github.com/beevik/etree#Element)
type.
**Breaking changes**
* A path starting with `//` is now interpreted as an absolute path.
Previously, it was interpreted as a relative path starting from the element
whose
[`FindElement`](https://godoc.org/github.com/beevik/etree#Element.FindElement)
method was called. To remain compatible with this release, all paths
prefixed with `//` should be prefixed with `.//` when called from any
element other than the document's root.
Release v1.0.0
==============
Initial release.

1034
vendor/github.com/beevik/etree/etree.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

188
vendor/github.com/beevik/etree/helpers.go generated vendored Normal file
View File

@@ -0,0 +1,188 @@
// Copyright 2015 Brett Vickers.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package etree
import (
"io"
"strings"
)
// A simple stack
type stack struct {
data []interface{}
}
func (s *stack) empty() bool {
return len(s.data) == 0
}
func (s *stack) push(value interface{}) {
s.data = append(s.data, value)
}
func (s *stack) pop() interface{} {
value := s.data[len(s.data)-1]
s.data[len(s.data)-1] = nil
s.data = s.data[:len(s.data)-1]
return value
}
func (s *stack) peek() interface{} {
return s.data[len(s.data)-1]
}
// A fifo is a simple first-in-first-out queue.
type fifo struct {
data []interface{}
head, tail int
}
func (f *fifo) add(value interface{}) {
if f.len()+1 >= len(f.data) {
f.grow()
}
f.data[f.tail] = value
if f.tail++; f.tail == len(f.data) {
f.tail = 0
}
}
func (f *fifo) remove() interface{} {
value := f.data[f.head]
f.data[f.head] = nil
if f.head++; f.head == len(f.data) {
f.head = 0
}
return value
}
func (f *fifo) len() int {
if f.tail >= f.head {
return f.tail - f.head
}
return len(f.data) - f.head + f.tail
}
func (f *fifo) grow() {
c := len(f.data) * 2
if c == 0 {
c = 4
}
buf, count := make([]interface{}, c), f.len()
if f.tail >= f.head {
copy(buf[0:count], f.data[f.head:f.tail])
} else {
hindex := len(f.data) - f.head
copy(buf[0:hindex], f.data[f.head:])
copy(buf[hindex:count], f.data[:f.tail])
}
f.data, f.head, f.tail = buf, 0, count
}
// countReader implements a proxy reader that counts the number of
// bytes read from its encapsulated reader.
type countReader struct {
r io.Reader
bytes int64
}
func newCountReader(r io.Reader) *countReader {
return &countReader{r: r}
}
func (cr *countReader) Read(p []byte) (n int, err error) {
b, err := cr.r.Read(p)
cr.bytes += int64(b)
return b, err
}
// countWriter implements a proxy writer that counts the number of
// bytes written by its encapsulated writer.
type countWriter struct {
w io.Writer
bytes int64
}
func newCountWriter(w io.Writer) *countWriter {
return &countWriter{w: w}
}
func (cw *countWriter) Write(p []byte) (n int, err error) {
b, err := cw.w.Write(p)
cw.bytes += int64(b)
return b, err
}
// isWhitespace returns true if the byte slice contains only
// whitespace characters.
func isWhitespace(s string) bool {
for i := 0; i < len(s); i++ {
if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' {
return false
}
}
return true
}
// spaceMatch returns true if namespace a is the empty string
// or if namespace a equals namespace b.
func spaceMatch(a, b string) bool {
switch {
case a == "":
return true
default:
return a == b
}
}
// spaceDecompose breaks a namespace:tag identifier at the ':'
// and returns the two parts.
func spaceDecompose(str string) (space, key string) {
colon := strings.IndexByte(str, ':')
if colon == -1 {
return "", str
}
return str[:colon], str[colon+1:]
}
// Strings used by crIndent
const (
crsp = "\n "
crtab = "\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t"
)
// crIndent returns a carriage return followed by n copies of the
// first non-CR character in the source string.
func crIndent(n int, source string) string {
switch {
case n < 0:
return source[:1]
case n < len(source):
return source[:n+1]
default:
return source + strings.Repeat(source[1:2], n-len(source)+1)
}
}
// nextIndex returns the index of the next occurrence of sep in s,
// starting from offset. It returns -1 if the sep string is not found.
func nextIndex(s, sep string, offset int) int {
switch i := strings.Index(s[offset:], sep); i {
case -1:
return -1
default:
return offset + i
}
}
// isInteger returns true if the string s contains an integer.
func isInteger(s string) bool {
for i := 0; i < len(s); i++ {
if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') {
return false
}
}
return true
}

533
vendor/github.com/beevik/etree/path.go generated vendored Normal file
View File

@@ -0,0 +1,533 @@
// Copyright 2015 Brett Vickers.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package etree
import (
"strconv"
"strings"
)
/*
A Path is an object that represents an optimized version of an XPath-like
search string. A path search string is a slash-separated series of "selectors"
allowing traversal through an XML hierarchy. Although etree path strings are
similar to XPath strings, they have a more limited set of selectors and
filtering options. The following selectors and filters are supported by etree
paths:
. Select the current element.
.. Select the parent of the current element.
* Select all child elements of the current element.
/ Select the root element when used at the start of a path.
// Select all descendants of the current element. If used at
the start of a path, select all descendants of the root.
tag Select all child elements with the given tag.
[#] Select the element of the given index (1-based,
negative starts from the end).
[@attrib] Select all elements with the given attribute.
[@attrib='val'] Select all elements with the given attribute set to val.
[tag] Select all elements with a child element named tag.
[tag='val'] Select all elements with a child element named tag
and text matching val.
[text()] Select all elements with non-empty text.
[text()='val'] Select all elements whose text matches val.
Examples:
Select the bookstore child element of the root element:
/bookstore
Beginning a search from the root element, select the title elements of all
descendant book elements having a 'category' attribute of 'WEB':
//book[@category='WEB']/title
Beginning a search from the current element, select the first descendant book
element with a title child containing the text 'Great Expectations':
.//book[title='Great Expectations'][1]
Beginning a search from the current element, select all children of book
elements with an attribute 'language' set to 'english':
./book/*[@language='english']
Beginning a search from the current element, select all children of book
elements containing the text 'special':
./book/*[text()='special']
Beginning a search from the current element, select all descendant book
elements whose title element has an attribute 'language' equal to 'french':
.//book/title[@language='french']/..
*/
type Path struct {
segments []segment
}
// ErrPath is returned by path functions when an invalid etree path is provided.
type ErrPath string
// Error returns the string describing a path error.
func (err ErrPath) Error() string {
return "etree: " + string(err)
}
// CompilePath creates an optimized version of an XPath-like string that
// can be used to query elements in an element tree.
func CompilePath(path string) (Path, error) {
var comp compiler
segments := comp.parsePath(path)
if comp.err != ErrPath("") {
return Path{nil}, comp.err
}
return Path{segments}, nil
}
// MustCompilePath creates an optimized version of an XPath-like string that
// can be used to query elements in an element tree. Panics if an error
// occurs. Use this function to create Paths when you know the path is
// valid (i.e., if it's hard-coded).
func MustCompilePath(path string) Path {
p, err := CompilePath(path)
if err != nil {
panic(err)
}
return p
}
// A segment is a portion of a path between "/" characters.
// It contains one selector and zero or more [filters].
type segment struct {
sel selector
filters []filter
}
func (seg *segment) apply(e *Element, p *pather) {
seg.sel.apply(e, p)
for _, f := range seg.filters {
f.apply(p)
}
}
// A selector selects XML elements for consideration by the
// path traversal.
type selector interface {
apply(e *Element, p *pather)
}
// A filter pares down a list of candidate XML elements based
// on a path filter in [brackets].
type filter interface {
apply(p *pather)
}
// A pather is helper object that traverses an element tree using
// a Path object. It collects and deduplicates all elements matching
// the path query.
type pather struct {
queue fifo
results []*Element
inResults map[*Element]bool
candidates []*Element
scratch []*Element // used by filters
}
// A node represents an element and the remaining path segments that
// should be applied against it by the pather.
type node struct {
e *Element
segments []segment
}
func newPather() *pather {
return &pather{
results: make([]*Element, 0),
inResults: make(map[*Element]bool),
candidates: make([]*Element, 0),
scratch: make([]*Element, 0),
}
}
// traverse follows the path from the element e, collecting
// and then returning all elements that match the path's selectors
// and filters.
func (p *pather) traverse(e *Element, path Path) []*Element {
for p.queue.add(node{e, path.segments}); p.queue.len() > 0; {
p.eval(p.queue.remove().(node))
}
return p.results
}
// eval evalutes the current path node by applying the remaining
// path's selector rules against the node's element.
func (p *pather) eval(n node) {
p.candidates = p.candidates[0:0]
seg, remain := n.segments[0], n.segments[1:]
seg.apply(n.e, p)
if len(remain) == 0 {
for _, c := range p.candidates {
if in := p.inResults[c]; !in {
p.inResults[c] = true
p.results = append(p.results, c)
}
}
} else {
for _, c := range p.candidates {
p.queue.add(node{c, remain})
}
}
}
// A compiler generates a compiled path from a path string.
type compiler struct {
err ErrPath
}
// parsePath parses an XPath-like string describing a path
// through an element tree and returns a slice of segment
// descriptors.
func (c *compiler) parsePath(path string) []segment {
// If path ends with //, fix it
if strings.HasSuffix(path, "//") {
path = path + "*"
}
var segments []segment
// Check for an absolute path
if strings.HasPrefix(path, "/") {
segments = append(segments, segment{new(selectRoot), []filter{}})
path = path[1:]
}
// Split path into segments
for _, s := range splitPath(path) {
segments = append(segments, c.parseSegment(s))
if c.err != ErrPath("") {
break
}
}
return segments
}
func splitPath(path string) []string {
pieces := make([]string, 0)
start := 0
inquote := false
for i := 0; i+1 <= len(path); i++ {
if path[i] == '\'' {
inquote = !inquote
} else if path[i] == '/' && !inquote {
pieces = append(pieces, path[start:i])
start = i + 1
}
}
return append(pieces, path[start:])
}
// parseSegment parses a path segment between / characters.
func (c *compiler) parseSegment(path string) segment {
pieces := strings.Split(path, "[")
seg := segment{
sel: c.parseSelector(pieces[0]),
filters: []filter{},
}
for i := 1; i < len(pieces); i++ {
fpath := pieces[i]
if fpath[len(fpath)-1] != ']' {
c.err = ErrPath("path has invalid filter [brackets].")
break
}
seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1]))
}
return seg
}
// parseSelector parses a selector at the start of a path segment.
func (c *compiler) parseSelector(path string) selector {
switch path {
case ".":
return new(selectSelf)
case "..":
return new(selectParent)
case "*":
return new(selectChildren)
case "":
return new(selectDescendants)
default:
return newSelectChildrenByTag(path)
}
}
// parseFilter parses a path filter contained within [brackets].
func (c *compiler) parseFilter(path string) filter {
if len(path) == 0 {
c.err = ErrPath("path contains an empty filter expression.")
return nil
}
// Filter contains [@attr='val'], [text()='val'], or [tag='val']?
eqindex := strings.Index(path, "='")
if eqindex >= 0 {
rindex := nextIndex(path, "'", eqindex+2)
if rindex != len(path)-1 {
c.err = ErrPath("path has mismatched filter quotes.")
return nil
}
switch {
case path[0] == '@':
return newFilterAttrVal(path[1:eqindex], path[eqindex+2:rindex])
case strings.HasPrefix(path, "text()"):
return newFilterTextVal(path[eqindex+2 : rindex])
default:
return newFilterChildText(path[:eqindex], path[eqindex+2:rindex])
}
}
// Filter contains [@attr], [N], [tag] or [text()]
switch {
case path[0] == '@':
return newFilterAttr(path[1:])
case path == "text()":
return newFilterText()
case isInteger(path):
pos, _ := strconv.Atoi(path)
switch {
case pos > 0:
return newFilterPos(pos - 1)
default:
return newFilterPos(pos)
}
default:
return newFilterChild(path)
}
}
// selectSelf selects the current element into the candidate list.
type selectSelf struct{}
func (s *selectSelf) apply(e *Element, p *pather) {
p.candidates = append(p.candidates, e)
}
// selectRoot selects the element's root node.
type selectRoot struct{}
func (s *selectRoot) apply(e *Element, p *pather) {
root := e
for root.parent != nil {
root = root.parent
}
p.candidates = append(p.candidates, root)
}
// selectParent selects the element's parent into the candidate list.
type selectParent struct{}
func (s *selectParent) apply(e *Element, p *pather) {
if e.parent != nil {
p.candidates = append(p.candidates, e.parent)
}
}
// selectChildren selects the element's child elements into the
// candidate list.
type selectChildren struct{}
func (s *selectChildren) apply(e *Element, p *pather) {
for _, c := range e.Child {
if c, ok := c.(*Element); ok {
p.candidates = append(p.candidates, c)
}
}
}
// selectDescendants selects all descendant child elements
// of the element into the candidate list.
type selectDescendants struct{}
func (s *selectDescendants) apply(e *Element, p *pather) {
var queue fifo
for queue.add(e); queue.len() > 0; {
e := queue.remove().(*Element)
p.candidates = append(p.candidates, e)
for _, c := range e.Child {
if c, ok := c.(*Element); ok {
queue.add(c)
}
}
}
}
// selectChildrenByTag selects into the candidate list all child
// elements of the element having the specified tag.
type selectChildrenByTag struct {
space, tag string
}
func newSelectChildrenByTag(path string) *selectChildrenByTag {
s, l := spaceDecompose(path)
return &selectChildrenByTag{s, l}
}
func (s *selectChildrenByTag) apply(e *Element, p *pather) {
for _, c := range e.Child {
if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag {
p.candidates = append(p.candidates, c)
}
}
}
// filterPos filters the candidate list, keeping only the
// candidate at the specified index.
type filterPos struct {
index int
}
func newFilterPos(pos int) *filterPos {
return &filterPos{pos}
}
func (f *filterPos) apply(p *pather) {
if f.index >= 0 {
if f.index < len(p.candidates) {
p.scratch = append(p.scratch, p.candidates[f.index])
}
} else {
if -f.index <= len(p.candidates) {
p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index])
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterAttr filters the candidate list for elements having
// the specified attribute.
type filterAttr struct {
space, key string
}
func newFilterAttr(str string) *filterAttr {
s, l := spaceDecompose(str)
return &filterAttr{s, l}
}
func (f *filterAttr) apply(p *pather) {
for _, c := range p.candidates {
for _, a := range c.Attr {
if spaceMatch(f.space, a.Space) && f.key == a.Key {
p.scratch = append(p.scratch, c)
break
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterAttrVal filters the candidate list for elements having
// the specified attribute with the specified value.
type filterAttrVal struct {
space, key, val string
}
func newFilterAttrVal(str, value string) *filterAttrVal {
s, l := spaceDecompose(str)
return &filterAttrVal{s, l, value}
}
func (f *filterAttrVal) apply(p *pather) {
for _, c := range p.candidates {
for _, a := range c.Attr {
if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value {
p.scratch = append(p.scratch, c)
break
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterText filters the candidate list for elements having text.
type filterText struct{}
func newFilterText() *filterText {
return &filterText{}
}
func (f *filterText) apply(p *pather) {
for _, c := range p.candidates {
if c.Text() != "" {
p.scratch = append(p.scratch, c)
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterTextVal filters the candidate list for elements having
// text equal to the specified value.
type filterTextVal struct {
val string
}
func newFilterTextVal(value string) *filterTextVal {
return &filterTextVal{value}
}
func (f *filterTextVal) apply(p *pather) {
for _, c := range p.candidates {
if c.Text() == f.val {
p.scratch = append(p.scratch, c)
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterChild filters the candidate list for elements having
// a child element with the specified tag.
type filterChild struct {
space, tag string
}
func newFilterChild(str string) *filterChild {
s, l := spaceDecompose(str)
return &filterChild{s, l}
}
func (f *filterChild) apply(p *pather) {
for _, c := range p.candidates {
for _, cc := range c.Child {
if cc, ok := cc.(*Element); ok &&
spaceMatch(f.space, cc.Space) &&
f.tag == cc.Tag {
p.scratch = append(p.scratch, c)
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}
// filterChildText filters the candidate list for elements having
// a child element with the specified tag and text.
type filterChildText struct {
space, tag, text string
}
func newFilterChildText(str, text string) *filterChildText {
s, l := spaceDecompose(str)
return &filterChildText{s, l, text}
}
func (f *filterChildText) apply(p *pather) {
for _, c := range p.candidates {
for _, cc := range c.Child {
if cc, ok := cc.(*Element); ok &&
spaceMatch(f.space, cc.Space) &&
f.tag == cc.Tag &&
f.text == cc.Text() {
p.scratch = append(p.scratch, c)
}
}
}
p.candidates, p.scratch = p.scratch, p.candidates[0:0]
}

5
vendor/github.com/crewjam/errset/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,5 @@
language: go
go:
- 1.5
- 1.6

24
vendor/github.com/crewjam/errset/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org>

21
vendor/github.com/crewjam/errset/README.md generated vendored Normal file
View File

@@ -0,0 +1,21 @@
errset is a trivial golang package that implements a slice of errors.
[![Build Status](https://travis-ci.org/crewjam/errset.svg?branch=master)](https://travis-ci.org/crewjam/errset)
[![](https://godoc.org/github.com/crewjam/errset?status.png)](http://godoc.org/github.com/crewjam/errset)
The typical go idiom is to return an `error` or a tuple of (thing, `error`) from functions. This works well if a function performs exactly one task, but
when a function does work which can reasonably partially fail, I found myself writing the same code over and over again. For example:
// CommitBatch commits as many things as it can.
func CommitBatch(things []Thing) error {
errs := errset.ErrSet{}
for _, thing := range things {
err := Commit(thing)
if err != nil {
errs = append(errs, err)
}
}
return errs.ReturnValue() // nil if there were no errors
}

48
vendor/github.com/crewjam/errset/errset.go generated vendored Normal file
View File

@@ -0,0 +1,48 @@
package errset
// ErrSet represents a list of errors.
//
// Example:
//
// errs := ErrSet{}
// for ... {
// if err := DoSomething(i); err != nil {
// errs = append(errs, err)
// }
// }
// return errs.ReturnValue()
//
type ErrSet []error
// ReturnValue returns the ErrSet object if at least one non-nill error is
// present or nil if there are no errors
func (es ErrSet) ReturnValue() error {
rv := ErrSet{}
for _, err := range es {
if err != nil {
rv = append(rv, err)
}
}
if len(rv) == 0 {
return nil
}
return rv
}
// Error implements the error interface. It returns each error in the list
// concatenated together with "; ".
func (es ErrSet) Error() string {
rv := ""
errCount := 0
for _, err := range es {
if err == nil {
continue
}
if errCount != 0 {
rv += "; "
}
rv += err.Error()
errCount++
}
return rv
}

4
vendor/github.com/kr/pretty/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,4 @@
[568].out
_go*
_test*
_obj

21
vendor/github.com/kr/pretty/License generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright 2012 Keith Rarick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

9
vendor/github.com/kr/pretty/Readme generated vendored Normal file
View File

@@ -0,0 +1,9 @@
package pretty
import "github.com/kr/pretty"
Package pretty provides pretty-printing for Go values.
Documentation
http://godoc.org/github.com/kr/pretty

265
vendor/github.com/kr/pretty/diff.go generated vendored Normal file
View File

@@ -0,0 +1,265 @@
package pretty
import (
"fmt"
"io"
"reflect"
)
type sbuf []string
func (p *sbuf) Printf(format string, a ...interface{}) {
s := fmt.Sprintf(format, a...)
*p = append(*p, s)
}
// Diff returns a slice where each element describes
// a difference between a and b.
func Diff(a, b interface{}) (desc []string) {
Pdiff((*sbuf)(&desc), a, b)
return desc
}
// wprintfer calls Fprintf on w for each Printf call
// with a trailing newline.
type wprintfer struct{ w io.Writer }
func (p *wprintfer) Printf(format string, a ...interface{}) {
fmt.Fprintf(p.w, format+"\n", a...)
}
// Fdiff writes to w a description of the differences between a and b.
func Fdiff(w io.Writer, a, b interface{}) {
Pdiff(&wprintfer{w}, a, b)
}
type Printfer interface {
Printf(format string, a ...interface{})
}
// Pdiff prints to p a description of the differences between a and b.
// It calls Printf once for each difference, with no trailing newline.
// The standard library log.Logger is a Printfer.
func Pdiff(p Printfer, a, b interface{}) {
diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b))
}
type Logfer interface {
Logf(format string, a ...interface{})
}
// logprintfer calls Fprintf on w for each Printf call
// with a trailing newline.
type logprintfer struct{ l Logfer }
func (p *logprintfer) Printf(format string, a ...interface{}) {
p.l.Logf(format, a...)
}
// Ldiff prints to l a description of the differences between a and b.
// It calls Logf once for each difference, with no trailing newline.
// The standard library testing.T and testing.B are Logfers.
func Ldiff(l Logfer, a, b interface{}) {
Pdiff(&logprintfer{l}, a, b)
}
type diffPrinter struct {
w Printfer
l string // label
}
func (w diffPrinter) printf(f string, a ...interface{}) {
var l string
if w.l != "" {
l = w.l + ": "
}
w.w.Printf(l+f, a...)
}
func (w diffPrinter) diff(av, bv reflect.Value) {
if !av.IsValid() && bv.IsValid() {
w.printf("nil != %# v", formatter{v: bv, quote: true})
return
}
if av.IsValid() && !bv.IsValid() {
w.printf("%# v != nil", formatter{v: av, quote: true})
return
}
if !av.IsValid() && !bv.IsValid() {
return
}
at := av.Type()
bt := bv.Type()
if at != bt {
w.printf("%v != %v", at, bt)
return
}
switch kind := at.Kind(); kind {
case reflect.Bool:
if a, b := av.Bool(), bv.Bool(); a != b {
w.printf("%v != %v", a, b)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if a, b := av.Int(), bv.Int(); a != b {
w.printf("%d != %d", a, b)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if a, b := av.Uint(), bv.Uint(); a != b {
w.printf("%d != %d", a, b)
}
case reflect.Float32, reflect.Float64:
if a, b := av.Float(), bv.Float(); a != b {
w.printf("%v != %v", a, b)
}
case reflect.Complex64, reflect.Complex128:
if a, b := av.Complex(), bv.Complex(); a != b {
w.printf("%v != %v", a, b)
}
case reflect.Array:
n := av.Len()
for i := 0; i < n; i++ {
w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
}
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
if a, b := av.Pointer(), bv.Pointer(); a != b {
w.printf("%#x != %#x", a, b)
}
case reflect.Interface:
w.diff(av.Elem(), bv.Elem())
case reflect.Map:
ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys())
for _, k := range ak {
w := w.relabel(fmt.Sprintf("[%#v]", k))
w.printf("%q != (missing)", av.MapIndex(k))
}
for _, k := range both {
w := w.relabel(fmt.Sprintf("[%#v]", k))
w.diff(av.MapIndex(k), bv.MapIndex(k))
}
for _, k := range bk {
w := w.relabel(fmt.Sprintf("[%#v]", k))
w.printf("(missing) != %q", bv.MapIndex(k))
}
case reflect.Ptr:
switch {
case av.IsNil() && !bv.IsNil():
w.printf("nil != %# v", formatter{v: bv, quote: true})
case !av.IsNil() && bv.IsNil():
w.printf("%# v != nil", formatter{v: av, quote: true})
case !av.IsNil() && !bv.IsNil():
w.diff(av.Elem(), bv.Elem())
}
case reflect.Slice:
lenA := av.Len()
lenB := bv.Len()
if lenA != lenB {
w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB)
break
}
for i := 0; i < lenA; i++ {
w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
}
case reflect.String:
if a, b := av.String(), bv.String(); a != b {
w.printf("%q != %q", a, b)
}
case reflect.Struct:
for i := 0; i < av.NumField(); i++ {
w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i))
}
default:
panic("unknown reflect Kind: " + kind.String())
}
}
func (d diffPrinter) relabel(name string) (d1 diffPrinter) {
d1 = d
if d.l != "" && name[0] != '[' {
d1.l += "."
}
d1.l += name
return d1
}
// keyEqual compares a and b for equality.
// Both a and b must be valid map keys.
func keyEqual(av, bv reflect.Value) bool {
if !av.IsValid() && !bv.IsValid() {
return true
}
if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() {
return false
}
switch kind := av.Kind(); kind {
case reflect.Bool:
a, b := av.Bool(), bv.Bool()
return a == b
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
a, b := av.Int(), bv.Int()
return a == b
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
a, b := av.Uint(), bv.Uint()
return a == b
case reflect.Float32, reflect.Float64:
a, b := av.Float(), bv.Float()
return a == b
case reflect.Complex64, reflect.Complex128:
a, b := av.Complex(), bv.Complex()
return a == b
case reflect.Array:
for i := 0; i < av.Len(); i++ {
if !keyEqual(av.Index(i), bv.Index(i)) {
return false
}
}
return true
case reflect.Chan, reflect.UnsafePointer, reflect.Ptr:
a, b := av.Pointer(), bv.Pointer()
return a == b
case reflect.Interface:
return keyEqual(av.Elem(), bv.Elem())
case reflect.String:
a, b := av.String(), bv.String()
return a == b
case reflect.Struct:
for i := 0; i < av.NumField(); i++ {
if !keyEqual(av.Field(i), bv.Field(i)) {
return false
}
}
return true
default:
panic("invalid map key type " + av.Type().String())
}
}
func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) {
for _, av := range a {
inBoth := false
for _, bv := range b {
if keyEqual(av, bv) {
inBoth = true
both = append(both, av)
break
}
}
if !inBoth {
ak = append(ak, av)
}
}
for _, bv := range b {
inBoth := false
for _, av := range a {
if keyEqual(av, bv) {
inBoth = true
break
}
}
if !inBoth {
bk = append(bk, bv)
}
}
return
}

328
vendor/github.com/kr/pretty/formatter.go generated vendored Normal file
View File

@@ -0,0 +1,328 @@
package pretty
import (
"fmt"
"io"
"reflect"
"strconv"
"text/tabwriter"
"github.com/kr/text"
)
type formatter struct {
v reflect.Value
force bool
quote bool
}
// Formatter makes a wrapper, f, that will format x as go source with line
// breaks and tabs. Object f responds to the "%v" formatting verb when both the
// "#" and " " (space) flags are set, for example:
//
// fmt.Sprintf("%# v", Formatter(x))
//
// If one of these two flags is not set, or any other verb is used, f will
// format x according to the usual rules of package fmt.
// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
func Formatter(x interface{}) (f fmt.Formatter) {
return formatter{v: reflect.ValueOf(x), quote: true}
}
func (fo formatter) String() string {
return fmt.Sprint(fo.v.Interface()) // unwrap it
}
func (fo formatter) passThrough(f fmt.State, c rune) {
s := "%"
for i := 0; i < 128; i++ {
if f.Flag(i) {
s += string(i)
}
}
if w, ok := f.Width(); ok {
s += fmt.Sprintf("%d", w)
}
if p, ok := f.Precision(); ok {
s += fmt.Sprintf(".%d", p)
}
s += string(c)
fmt.Fprintf(f, s, fo.v.Interface())
}
func (fo formatter) Format(f fmt.State, c rune) {
if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
p := &printer{tw: w, Writer: w, visited: make(map[visit]int)}
p.printValue(fo.v, true, fo.quote)
w.Flush()
return
}
fo.passThrough(f, c)
}
type printer struct {
io.Writer
tw *tabwriter.Writer
visited map[visit]int
depth int
}
func (p *printer) indent() *printer {
q := *p
q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
return &q
}
func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
if showType {
io.WriteString(p, v.Type().String())
fmt.Fprintf(p, "(%#v)", x)
} else {
fmt.Fprintf(p, "%#v", x)
}
}
// printValue must keep track of already-printed pointer values to avoid
// infinite recursion.
type visit struct {
v uintptr
typ reflect.Type
}
func (p *printer) printValue(v reflect.Value, showType, quote bool) {
if p.depth > 10 {
io.WriteString(p, "!%v(DEPTH EXCEEDED)")
return
}
switch v.Kind() {
case reflect.Bool:
p.printInline(v, v.Bool(), showType)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p.printInline(v, v.Int(), showType)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p.printInline(v, v.Uint(), showType)
case reflect.Float32, reflect.Float64:
p.printInline(v, v.Float(), showType)
case reflect.Complex64, reflect.Complex128:
fmt.Fprintf(p, "%#v", v.Complex())
case reflect.String:
p.fmtString(v.String(), quote)
case reflect.Map:
t := v.Type()
if showType {
io.WriteString(p, t.String())
}
writeByte(p, '{')
if nonzero(v) {
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
keys := v.MapKeys()
for i := 0; i < v.Len(); i++ {
showTypeInStruct := true
k := keys[i]
mv := v.MapIndex(k)
pp.printValue(k, false, true)
writeByte(pp, ':')
if expand {
writeByte(pp, '\t')
}
showTypeInStruct = t.Elem().Kind() == reflect.Interface
pp.printValue(mv, showTypeInStruct, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.Len()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
}
writeByte(p, '}')
case reflect.Struct:
t := v.Type()
if v.CanAddr() {
addr := v.UnsafeAddr()
vis := visit{addr, t}
if vd, ok := p.visited[vis]; ok && vd < p.depth {
p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false)
break // don't print v again
}
p.visited[vis] = p.depth
}
if showType {
io.WriteString(p, t.String())
}
writeByte(p, '{')
if nonzero(v) {
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
for i := 0; i < v.NumField(); i++ {
showTypeInStruct := true
if f := t.Field(i); f.Name != "" {
io.WriteString(pp, f.Name)
writeByte(pp, ':')
if expand {
writeByte(pp, '\t')
}
showTypeInStruct = labelType(f.Type)
}
pp.printValue(getField(v, i), showTypeInStruct, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.NumField()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
}
writeByte(p, '}')
case reflect.Interface:
switch e := v.Elem(); {
case e.Kind() == reflect.Invalid:
io.WriteString(p, "nil")
case e.IsValid():
pp := *p
pp.depth++
pp.printValue(e, showType, true)
default:
io.WriteString(p, v.Type().String())
io.WriteString(p, "(nil)")
}
case reflect.Array, reflect.Slice:
t := v.Type()
if showType {
io.WriteString(p, t.String())
}
if v.Kind() == reflect.Slice && v.IsNil() && showType {
io.WriteString(p, "(nil)")
break
}
if v.Kind() == reflect.Slice && v.IsNil() {
io.WriteString(p, "nil")
break
}
writeByte(p, '{')
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
for i := 0; i < v.Len(); i++ {
showTypeInSlice := t.Elem().Kind() == reflect.Interface
pp.printValue(v.Index(i), showTypeInSlice, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.Len()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
writeByte(p, '}')
case reflect.Ptr:
e := v.Elem()
if !e.IsValid() {
writeByte(p, '(')
io.WriteString(p, v.Type().String())
io.WriteString(p, ")(nil)")
} else {
pp := *p
pp.depth++
writeByte(pp, '&')
pp.printValue(e, true, true)
}
case reflect.Chan:
x := v.Pointer()
if showType {
writeByte(p, '(')
io.WriteString(p, v.Type().String())
fmt.Fprintf(p, ")(%#v)", x)
} else {
fmt.Fprintf(p, "%#v", x)
}
case reflect.Func:
io.WriteString(p, v.Type().String())
io.WriteString(p, " {...}")
case reflect.UnsafePointer:
p.printInline(v, v.Pointer(), showType)
case reflect.Invalid:
io.WriteString(p, "nil")
}
}
func canInline(t reflect.Type) bool {
switch t.Kind() {
case reflect.Map:
return !canExpand(t.Elem())
case reflect.Struct:
for i := 0; i < t.NumField(); i++ {
if canExpand(t.Field(i).Type) {
return false
}
}
return true
case reflect.Interface:
return false
case reflect.Array, reflect.Slice:
return !canExpand(t.Elem())
case reflect.Ptr:
return false
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
return false
}
return true
}
func canExpand(t reflect.Type) bool {
switch t.Kind() {
case reflect.Map, reflect.Struct,
reflect.Interface, reflect.Array, reflect.Slice,
reflect.Ptr:
return true
}
return false
}
func labelType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Interface, reflect.Struct:
return true
}
return false
}
func (p *printer) fmtString(s string, quote bool) {
if quote {
s = strconv.Quote(s)
}
io.WriteString(p, s)
}
func writeByte(w io.Writer, b byte) {
w.Write([]byte{b})
}
func getField(v reflect.Value, i int) reflect.Value {
val := v.Field(i)
if val.Kind() == reflect.Interface && !val.IsNil() {
val = val.Elem()
}
return val
}

3
vendor/github.com/kr/pretty/go.mod generated vendored Normal file
View File

@@ -0,0 +1,3 @@
module "github.com/kr/pretty"
require "github.com/kr/text" v0.1.0

108
vendor/github.com/kr/pretty/pretty.go generated vendored Normal file
View File

@@ -0,0 +1,108 @@
// Package pretty provides pretty-printing for Go values. This is
// useful during debugging, to avoid wrapping long output lines in
// the terminal.
//
// It provides a function, Formatter, that can be used with any
// function that accepts a format string. It also provides
// convenience wrappers for functions in packages fmt and log.
package pretty
import (
"fmt"
"io"
"log"
"reflect"
)
// Errorf is a convenience wrapper for fmt.Errorf.
//
// Calling Errorf(f, x, y) is equivalent to
// fmt.Errorf(f, Formatter(x), Formatter(y)).
func Errorf(format string, a ...interface{}) error {
return fmt.Errorf(format, wrap(a, false)...)
}
// Fprintf is a convenience wrapper for fmt.Fprintf.
//
// Calling Fprintf(w, f, x, y) is equivalent to
// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
return fmt.Fprintf(w, format, wrap(a, false)...)
}
// Log is a convenience wrapper for log.Printf.
//
// Calling Log(x, y) is equivalent to
// log.Print(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Log(a ...interface{}) {
log.Print(wrap(a, true)...)
}
// Logf is a convenience wrapper for log.Printf.
//
// Calling Logf(f, x, y) is equivalent to
// log.Printf(f, Formatter(x), Formatter(y)).
func Logf(format string, a ...interface{}) {
log.Printf(format, wrap(a, false)...)
}
// Logln is a convenience wrapper for log.Printf.
//
// Calling Logln(x, y) is equivalent to
// log.Println(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Logln(a ...interface{}) {
log.Println(wrap(a, true)...)
}
// Print pretty-prints its operands and writes to standard output.
//
// Calling Print(x, y) is equivalent to
// fmt.Print(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Print(a ...interface{}) (n int, errno error) {
return fmt.Print(wrap(a, true)...)
}
// Printf is a convenience wrapper for fmt.Printf.
//
// Calling Printf(f, x, y) is equivalent to
// fmt.Printf(f, Formatter(x), Formatter(y)).
func Printf(format string, a ...interface{}) (n int, errno error) {
return fmt.Printf(format, wrap(a, false)...)
}
// Println pretty-prints its operands and writes to standard output.
//
// Calling Print(x, y) is equivalent to
// fmt.Println(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Println(a ...interface{}) (n int, errno error) {
return fmt.Println(wrap(a, true)...)
}
// Sprint is a convenience wrapper for fmt.Sprintf.
//
// Calling Sprint(x, y) is equivalent to
// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Sprint(a ...interface{}) string {
return fmt.Sprint(wrap(a, true)...)
}
// Sprintf is a convenience wrapper for fmt.Sprintf.
//
// Calling Sprintf(f, x, y) is equivalent to
// fmt.Sprintf(f, Formatter(x), Formatter(y)).
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, wrap(a, false)...)
}
func wrap(a []interface{}, force bool) []interface{} {
w := make([]interface{}, len(a))
for i, x := range a {
w[i] = formatter{v: reflect.ValueOf(x), force: force}
}
return w
}

41
vendor/github.com/kr/pretty/zero.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
package pretty
import (
"reflect"
)
func nonzero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() != 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() != 0
case reflect.Float32, reflect.Float64:
return v.Float() != 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() != complex(0, 0)
case reflect.String:
return v.String() != ""
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if nonzero(getField(v, i)) {
return true
}
}
return false
case reflect.Array:
for i := 0; i < v.Len(); i++ {
if nonzero(v.Index(i)) {
return true
}
}
return false
case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func:
return !v.IsNil()
case reflect.UnsafePointer:
return v.Pointer() != 0
}
return true
}

19
vendor/github.com/kr/text/License generated vendored Normal file
View File

@@ -0,0 +1,19 @@
Copyright 2012 Keith Rarick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

3
vendor/github.com/kr/text/Readme generated vendored Normal file
View File

@@ -0,0 +1,3 @@
This is a Go package for manipulating paragraphs of text.
See http://go.pkgdoc.org/github.com/kr/text for full documentation.

3
vendor/github.com/kr/text/doc.go generated vendored Normal file
View File

@@ -0,0 +1,3 @@
// Package text provides rudimentary functions for manipulating text in
// paragraphs.
package text

3
vendor/github.com/kr/text/go.mod generated vendored Normal file
View File

@@ -0,0 +1,3 @@
module "github.com/kr/text"
require "github.com/kr/pty" v1.1.1

74
vendor/github.com/kr/text/indent.go generated vendored Normal file
View File

@@ -0,0 +1,74 @@
package text
import (
"io"
)
// Indent inserts prefix at the beginning of each non-empty line of s. The
// end-of-line marker is NL.
func Indent(s, prefix string) string {
return string(IndentBytes([]byte(s), []byte(prefix)))
}
// IndentBytes inserts prefix at the beginning of each non-empty line of b.
// The end-of-line marker is NL.
func IndentBytes(b, prefix []byte) []byte {
var res []byte
bol := true
for _, c := range b {
if bol && c != '\n' {
res = append(res, prefix...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
// Writer indents each line of its input.
type indentWriter struct {
w io.Writer
bol bool
pre [][]byte
sel int
off int
}
// NewIndentWriter makes a new write filter that indents the input
// lines. Each line is prefixed in order with the corresponding
// element of pre. If there are more lines than elements, the last
// element of pre is repeated for each subsequent line.
func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
return &indentWriter{
w: w,
pre: pre,
bol: true,
}
}
// The only errors returned are from the underlying indentWriter.
func (w *indentWriter) Write(p []byte) (n int, err error) {
for _, c := range p {
if w.bol {
var i int
i, err = w.w.Write(w.pre[w.sel][w.off:])
w.off += i
if err != nil {
return n, err
}
}
_, err = w.w.Write([]byte{c})
if err != nil {
return n, err
}
n++
w.bol = c == '\n'
if w.bol {
w.off = 0
if w.sel < len(w.pre)-1 {
w.sel++
}
}
}
return n, nil
}

86
vendor/github.com/kr/text/wrap.go generated vendored Normal file
View File

@@ -0,0 +1,86 @@
package text
import (
"bytes"
"math"
)
var (
nl = []byte{'\n'}
sp = []byte{' '}
)
const defaultPenalty = 1e5
// Wrap wraps s into a paragraph of lines of length lim, with minimal
// raggedness.
func Wrap(s string, lim int) string {
return string(WrapBytes([]byte(s), lim))
}
// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
// raggedness.
func WrapBytes(b []byte, lim int) []byte {
words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
var lines [][]byte
for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
lines = append(lines, bytes.Join(line, sp))
}
return bytes.Join(lines, nl)
}
// WrapWords is the low-level line-breaking algorithm, useful if you need more
// control over the details of the text wrapping process. For most uses, either
// Wrap or WrapBytes will be sufficient and more convenient.
//
// WrapWords splits a list of words into lines with minimal "raggedness",
// treating each byte as one unit, accounting for spc units between adjacent
// words on each line, and attempting to limit lines to lim units. Raggedness
// is the total error over all lines, where error is the square of the
// difference of the length of the line and lim. Too-long lines (which only
// happen when a single word is longer than lim units) have pen penalty units
// added to the error.
func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
n := len(words)
length := make([][]int, n)
for i := 0; i < n; i++ {
length[i] = make([]int, n)
length[i][i] = len(words[i])
for j := i + 1; j < n; j++ {
length[i][j] = length[i][j-1] + spc + len(words[j])
}
}
nbrk := make([]int, n)
cost := make([]int, n)
for i := range cost {
cost[i] = math.MaxInt32
}
for i := n - 1; i >= 0; i-- {
if length[i][n-1] <= lim || i == n-1 {
cost[i] = 0
nbrk[i] = n
} else {
for j := i + 1; j < n; j++ {
d := lim - length[i][j-1]
c := d*d + cost[j]
if length[i][j-1] > lim {
c += pen // too-long lines get a worse penalty
}
if c < cost[i] {
cost[i] = c
nbrk[i] = j
}
}
}
}
var lines [][][]byte
i := 0
for i < n {
lines = append(lines, words[i:nbrk[i]])
i = nbrk[i]
}
return lines
}

24
vendor/github.com/ma314smith/signedxml/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

16
vendor/github.com/ma314smith/signedxml/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,16 @@
language: go
sudo: false
go:
- 1.x
- 1.6
- 1.7.x
- master
before_install:
- go get -v github.com/golang/lint/golint
script:
- go vet ./...
- golint ./...
- go test -cover -v ./...

21
vendor/github.com/ma314smith/signedxml/LICENSE.md generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Matt Smith
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

98
vendor/github.com/ma314smith/signedxml/README.md generated vendored Normal file
View File

@@ -0,0 +1,98 @@
## signedxml
[![Build Status](https://travis-ci.org/ma314smith/signedxml.svg?branch=master)](https://travis-ci.org/ma314smith/signedxml)
[![GoDoc](https://godoc.org/github.com/ma314smith/signedxml?status.svg)](https://godoc.org/github.com/ma314smith/signedxml)
The signedxml package transforms and validates signed xml documents. The main use case is to support Single Sign On protocols like SAML and WS-Federation.
Other packages that provide similar functionality rely on C libraries, which makes them difficult to run across platforms without significant configuration. `signedxml` is written in pure go, and can be easily used on any platform.
### Install
`go get github.com/ma314smith/signedxml`
### Included Algorithms
- Hashes
- http://www.w3.org/2001/04/xmldsig-more#md5
- http://www.w3.org/2000/09/xmldsig#sha1
- http://www.w3.org/2001/04/xmldsig-more#sha224
- http://www.w3.org/2001/04/xmlenc#sha256
- http://www.w3.org/2001/04/xmldsig-more#sha384
- http://www.w3.org/2001/04/xmlenc#sha512
- http://www.w3.org/2001/04/xmlenc#ripemd160
- Signatures
- http://www.w3.org/2001/04/xmldsig-more#rsa-md2
- http://www.w3.org/2001/04/xmldsig-more#rsa-md5
- http://www.w3.org/2000/09/xmldsig#rsa-sha1
- http://www.w3.org/2001/04/xmldsig-more#rsa-sha256
- http://www.w3.org/2001/04/xmldsig-more#rsa-sha384
- http://www.w3.org/2001/04/xmldsig-more#rsa-sha512
- http://www.w3.org/2000/09/xmldsig#dsa-sha1
- http://www.w3.org/2000/09/xmldsig#dsa-sha256
- http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha1
- http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha256
- http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha384
- http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha512
- Canonicalization Methods/Transforms
- http://www.w3.org/2000/09/xmldsig#enveloped-signature
- http://www.w3.org/2001/10/xml-exc-c14n#
- http://www.w3.org/2001/10/xml-exc-c14n#WithComments
### Examples
#### Validating signed XML
If your signed xml contains the signature and certificate, then you can just pass in the xml and call `Validate()`.
```go
validator, err := signedxml.NewValidator(`<YourXMLString></YourXMLString>`)
xml, err = validator.ValidateReferences()
```
`ValidateReferences()` verifies the DigestValue and SignatureValue in the xml document, and returns the signed payload(s). If the error value is `nil`, then the signed xml is valid.
The x509.Certificate that was successfully used to validate the xml will be available by calling:
```go
validator.SigningCert()
```
You can then verify that you trust the certificate. You can optionally supply your trusted certificates ahead of time by assigning them to the `Certificates` property of the `Validator` object, which is an x509.Certificate array.
#### Using an external Signature
If you need to specify an external Signature, you can use the `SetSignature()` function to assign it:
```go
validator.SetSignature(<`Signature></Signature>`)
```
#### Generating signed XML
It is expected that your XML contains the Signature element with all the parameters set (except DigestValue and SignatureValue).
```go
signer, err := signedxml.NewSigner(`<YourXMLString></YourXMLString`)
signedXML, err := signer.Sign(`*rsa.PrivateKey object`)
```
`Sign()` will generate the DigestValue and SignatureValue, populate it in the XML, and return the signed XML string.
#### Implementing custom transforms
Additional Transform algorithms can be included by adding to the CanonicalizationAlgorithms map. This interface will need to be implemented:
```go
type CanonicalizationAlgorithm interface {
Process(inputXML string, transformXML string) (outputXML string, err error)
}
```
Simple Example:
```go
type NoChangeCanonicalization struct{}
func (n NoChangeCanonicalization) Process(inputXML string,
transformXML string) (outputXML string, err error) {
return inputXML, nil
}
signedxml.CanonicalizationAlgorithms["http://myTranform"] = NoChangeCanonicalization{}
```
See `envelopedsignature.go` and `exclusivecanonicalization.go` for examples of actual implementations.
### Contributions
Contributions are welcome. Just fork the repo and send a pull request.

View File

@@ -0,0 +1,39 @@
package signedxml
import (
"errors"
"github.com/beevik/etree"
)
// EnvelopedSignature implements the CanonicalizationAlgorithm
// interface and is used for processing the
// http://www.w3.org/2000/09/xmldsig#enveloped-signature transform
// algorithm
type EnvelopedSignature struct{}
// Process is called to transfrom the XML using the EnvelopedSignature
// algorithm
func (e EnvelopedSignature) Process(inputXML string,
transformXML string) (outputXML string, err error) {
doc := etree.NewDocument()
doc.ReadFromString(inputXML)
sig := doc.FindElement(".//Signature")
if sig == nil {
return "", errors.New("signedxml: unable to find Signature node")
}
sigParent := sig.Parent()
elem := sigParent.RemoveChild(sig)
if elem == nil {
return "", errors.New("signedxml: unable to remove Signature element")
}
docString, err := doc.WriteToString()
if err != nil {
return "", err
}
//logger.Println(docString)
return docString, nil
}

View File

@@ -0,0 +1,302 @@
package signedxml
import (
"sort"
"strings"
"github.com/beevik/etree"
)
// the attribute and attributes structs are used to implement the sort.Interface
type attribute struct {
prefix, uri, key, value string
}
type attributes []attribute
func (a attributes) Len() int {
return len(a)
}
// Less is part of the sort.Interface, and is used to order attributes by their
// namespace URIs and then by their keys.
func (a attributes) Less(i, j int) bool {
if a[i].uri == "" && a[j].uri != "" {
return true
}
if a[j].uri == "" && a[i].uri != "" {
return false
}
iQual := a[i].uri + a[i].key
jQual := a[j].uri + a[j].key
return iQual < jQual
}
func (a attributes) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
// ExclusiveCanonicalization implements the CanonicalizationAlgorithm
// interface and is used for processing the
// http://www.w3.org/2001/10/xml-exc-c14n# and
// http://www.w3.org/2001/10/xml-exc-c14n#WithComments transform
// algorithms
type ExclusiveCanonicalization struct {
WithComments bool
inclusiveNamespacePrefixList []string
namespaces map[string]string
}
// Process is called to transfrom the XML using the ExclusiveCanonicalization
// algorithm
func (e ExclusiveCanonicalization) Process(inputXML string,
transformXML string) (outputXML string, err error) {
e.namespaces = make(map[string]string)
doc := etree.NewDocument()
doc.WriteSettings.CanonicalEndTags = true
doc.WriteSettings.CanonicalText = true
doc.WriteSettings.CanonicalAttrVal = true
err = doc.ReadFromString(inputXML)
if err != nil {
return "", err
}
e.loadPrefixList(transformXML)
e.processDocLevelNodes(doc)
e.processRecursive(doc.Root(), nil, "")
outputXML, err = doc.WriteToString()
return outputXML, err
}
func (e *ExclusiveCanonicalization) loadPrefixList(transformXML string) {
if transformXML != "" {
tDoc := etree.NewDocument()
tDoc.ReadFromString(transformXML)
inclNSNode := tDoc.Root().SelectElement("InclusiveNamespaces")
if inclNSNode != nil {
prefixList := inclNSNode.SelectAttrValue("PrefixList", "")
if prefixList != "" {
e.inclusiveNamespacePrefixList = strings.Split(prefixList, " ")
}
}
}
}
// process nodes outside of the root element
func (e ExclusiveCanonicalization) processDocLevelNodes(doc *etree.Document) {
// keep track of the previous node action to manage line returns in CharData
previousNodeRemoved := false
for i := 0; i < len(doc.Child); i++ {
c := doc.Child[i]
switch c := c.(type) {
case *etree.Comment:
if e.WithComments {
previousNodeRemoved = false
} else {
removeTokenFromDocument(c, doc)
i--
previousNodeRemoved = true
}
case *etree.CharData:
if isWhitespace(c.Data) {
if previousNodeRemoved {
removeTokenFromDocument(c, doc)
i--
previousNodeRemoved = true
} else {
c.Data = "\n"
}
}
case *etree.Directive:
removeTokenFromDocument(c, doc)
i--
previousNodeRemoved = true
case *etree.ProcInst:
// remove declaration, but leave other PI's
if c.Target == "xml" {
removeTokenFromDocument(c, doc)
i--
previousNodeRemoved = true
} else {
previousNodeRemoved = false
}
default:
previousNodeRemoved = false
}
}
// if the last line is CharData whitespace, then remove it
if c, ok := doc.Child[len(doc.Child)-1].(*etree.CharData); ok {
if isWhitespace(c.Data) {
removeTokenFromDocument(c, doc)
}
}
}
func (e ExclusiveCanonicalization) processRecursive(node *etree.Element,
prefixesInScope []string, defaultNS string) {
newDefaultNS, newPrefixesInScope :=
e.renderAttributes(node, prefixesInScope, defaultNS)
for _, child := range node.Child {
switch child := child.(type) {
case *etree.Comment:
if !e.WithComments {
removeTokenFromElement(etree.Token(child), node)
}
case *etree.Element:
e.processRecursive(child, newPrefixesInScope, newDefaultNS)
}
}
}
func (e ExclusiveCanonicalization) renderAttributes(node *etree.Element,
prefixesInScope []string, defaultNS string) (newDefaultNS string,
newPrefixesInScope []string) {
currentNS := node.SelectAttrValue("xmlns", defaultNS)
elementAttributes := []etree.Attr{}
nsListToRender := make(map[string]string)
attrListToRender := attributes{}
// load map with for prefix -> uri lookup
for _, attr := range node.Attr {
if attr.Space == "xmlns" {
e.namespaces[attr.Key] = attr.Value
}
}
// handle the namespace of the node itself
if node.Space != "" {
if !contains(prefixesInScope, node.Space) {
nsListToRender["xmlns:"+node.Space] = e.namespaces[node.Space]
prefixesInScope = append(prefixesInScope, node.Space)
}
} else if defaultNS != currentNS {
newDefaultNS = currentNS
elementAttributes = append(elementAttributes,
etree.Attr{Key: "xmlns", Value: currentNS})
}
for _, attr := range node.Attr {
// include the namespaces if they are in the inclusiveNamespacePrefixList
if attr.Space == "xmlns" {
if !contains(prefixesInScope, attr.Key) &&
contains(e.inclusiveNamespacePrefixList, attr.Key) {
nsListToRender["xmlns:"+attr.Key] = attr.Value
prefixesInScope = append(prefixesInScope, attr.Key)
}
}
// include namespaces for qualfied attributes
if attr.Space != "" &&
attr.Space != "xmlns" &&
!contains(prefixesInScope, attr.Space) {
nsListToRender["xmlns:"+attr.Space] = e.namespaces[attr.Space]
prefixesInScope = append(prefixesInScope, attr.Space)
}
// inclued all non-namespace attributes
if attr.Space != "xmlns" && attr.Key != "xmlns" {
attrListToRender = append(attrListToRender,
attribute{
prefix: attr.Space,
uri: e.namespaces[attr.Space],
key: attr.Key,
value: attr.Value,
})
}
}
// sort and add the namespace attributes first
sortedNSList := getSortedNamespaces(nsListToRender)
elementAttributes = append(elementAttributes, sortedNSList...)
// then sort and add the non-namespace attributes
sortedAttributes := getSortedAttributes(attrListToRender)
elementAttributes = append(elementAttributes, sortedAttributes...)
// replace the nodes attributes with the sorted copy
node.Attr = elementAttributes
return currentNS, prefixesInScope
}
func contains(slice []string, value string) bool {
for _, s := range slice {
if s == value {
return true
}
}
return false
}
// getSortedNamespaces sorts the namespace attributes by their prefix
func getSortedNamespaces(list map[string]string) []etree.Attr {
var keys []string
for k := range list {
keys = append(keys, k)
}
sort.Strings(keys)
elem := etree.Element{}
for _, k := range keys {
elem.CreateAttr(k, list[k])
}
return elem.Attr
}
// getSortedAttributes sorts attributes by their namespace URIs
func getSortedAttributes(list attributes) []etree.Attr {
sort.Sort(list)
attrs := make([]etree.Attr, len(list))
for i, a := range list {
attrs[i] = etree.Attr{
Space: a.prefix,
Key: a.key,
Value: a.value,
}
}
return attrs
}
func removeTokenFromElement(token etree.Token, e *etree.Element) *etree.Token {
for i, t := range e.Child {
if t == token {
e.Child = append(e.Child[0:i], e.Child[i+1:]...)
return &t
}
}
return nil
}
func removeTokenFromDocument(token etree.Token, d *etree.Document) *etree.Token {
for i, t := range d.Child {
if t == token {
d.Child = append(d.Child[0:i], d.Child[i+1:]...)
return &t
}
}
return nil
}
// isWhitespace returns true if the byte slice contains only
// whitespace characters.
func isWhitespace(s string) bool {
for i := 0; i < len(s); i++ {
if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' {
return false
}
}
return true
}

319
vendor/github.com/ma314smith/signedxml/signedxml.go generated vendored Normal file
View File

@@ -0,0 +1,319 @@
// Package signedxml transforms and validates signedxml documents
package signedxml
import (
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"log"
"os"
"strings"
"github.com/beevik/etree"
)
var logger = log.New(os.Stdout, "DEBUG-SIGNEDXML: ", log.Ldate|log.Ltime|log.Lshortfile)
func init() {
hashAlgorithms = map[string]crypto.Hash{
"http://www.w3.org/2001/04/xmldsig-more#md5": crypto.MD5,
"http://www.w3.org/2000/09/xmldsig#sha1": crypto.SHA1,
"http://www.w3.org/2001/04/xmldsig-more#sha224": crypto.SHA224,
"http://www.w3.org/2001/04/xmlenc#sha256": crypto.SHA256,
"http://www.w3.org/2001/04/xmldsig-more#sha384": crypto.SHA384,
"http://www.w3.org/2001/04/xmlenc#sha512": crypto.SHA512,
"http://www.w3.org/2001/04/xmlenc#ripemd160": crypto.RIPEMD160,
}
signatureAlgorithms = map[string]x509.SignatureAlgorithm{
"http://www.w3.org/2001/04/xmldsig-more#rsa-md2": x509.MD2WithRSA,
"http://www.w3.org/2001/04/xmldsig-more#rsa-md5": x509.MD5WithRSA,
"http://www.w3.org/2000/09/xmldsig#rsa-sha1": x509.SHA1WithRSA,
"http://www.w3.org/2001/04/xmldsig-more#rsa-sha256": x509.SHA256WithRSA,
"http://www.w3.org/2001/04/xmldsig-more#rsa-sha384": x509.SHA384WithRSA,
"http://www.w3.org/2001/04/xmldsig-more#rsa-sha512": x509.SHA512WithRSA,
"http://www.w3.org/2000/09/xmldsig#dsa-sha1": x509.DSAWithSHA1,
"http://www.w3.org/2000/09/xmldsig#dsa-sha256": x509.DSAWithSHA256,
"http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha1": x509.ECDSAWithSHA1,
"http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha256": x509.ECDSAWithSHA256,
"http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha384": x509.ECDSAWithSHA384,
"http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha512": x509.ECDSAWithSHA512,
}
CanonicalizationAlgorithms = map[string]CanonicalizationAlgorithm{
"http://www.w3.org/2000/09/xmldsig#enveloped-signature": EnvelopedSignature{},
"http://www.w3.org/2001/10/xml-exc-c14n#": ExclusiveCanonicalization{},
"http://www.w3.org/2001/10/xml-exc-c14n#WithComments": ExclusiveCanonicalization{WithComments: true},
}
}
// CanonicalizationAlgorithm defines an interface for processing an XML
// document into a standard format.
//
// If any child elements are in the Transform node, the entire transform node
// will be passed to the Process method through the transformXML parameter as an
// XML string. This is necessary for transforms that need additional processing
// data, like XPath (http://www.w3.org/TR/xmldsig-core/#sec-XPath). If there are
// no child elements in Transform (or CanonicalizationMethod), then an empty
// string will be passed through.
type CanonicalizationAlgorithm interface {
Process(inputXML string, transformXML string) (outputXML string, err error)
}
// CanonicalizationAlgorithms maps the CanonicalizationMethod or
// Transform Algorithm URIs to a type that implements the
// CanonicalizationAlgorithm interface.
//
// Implementations are provided for the following transforms:
// http://www.w3.org/2001/10/xml-exc-c14n# (ExclusiveCanonicalization)
// http://www.w3.org/2001/10/xml-exc-c14n#WithComments (ExclusiveCanonicalizationWithComments)
// http://www.w3.org/2000/09/xmldsig#enveloped-signature (EnvelopedSignature)
//
// Custom implementations can be added to the map
var CanonicalizationAlgorithms map[string]CanonicalizationAlgorithm
var hashAlgorithms map[string]crypto.Hash
var signatureAlgorithms map[string]x509.SignatureAlgorithm
// signatureData provides options for verifying a signed XML document
type signatureData struct {
xml *etree.Document
signature *etree.Element
signedInfo *etree.Element
sigValue string
sigAlgorithm x509.SignatureAlgorithm
canonAlgorithm CanonicalizationAlgorithm
}
// SetSignature can be used to assign an external signature for the XML doc
// that Validator will verify
func (s *signatureData) SetSignature(sig string) error {
doc := etree.NewDocument()
err := doc.ReadFromString(sig)
s.signature = doc.Root()
return err
}
func (s *signatureData) parseEnvelopedSignature() error {
sig := s.xml.FindElement(".//Signature")
if sig != nil {
s.signature = sig
} else {
return errors.New("signedxml: Unable to find a unique signature element " +
"in the xml document. The signature must either be enveloped in the " +
"xml doc or externally assigned to Validator.SetSignature")
}
return nil
}
func (s *signatureData) parseSignedInfo() error {
s.signedInfo = nil
s.signedInfo = s.signature.SelectElement("SignedInfo")
if s.signedInfo == nil {
return errors.New("signedxml: unable to find SignedInfo element")
}
// move the Signature level namespace down to SignedInfo so that the signature
// value will match up
if s.signedInfo.Space != "" {
attr := s.signature.SelectAttr(s.signedInfo.Space)
if attr != nil {
s.signedInfo.Attr = []etree.Attr{*attr}
}
} else {
attr := s.signature.SelectAttr("xmlns")
if attr != nil {
s.signedInfo.Attr = []etree.Attr{*attr}
}
}
// Copy SignedInfo xmlns: into itself if it does not exist and is defined as a root attribute
root := s.xml.Root()
if root != nil {
sigNS := root.SelectAttr("xmlns:" + s.signedInfo.Space)
if sigNS != nil {
if s.signedInfo.SelectAttr("xmlns:"+s.signedInfo.Space) == nil {
s.signedInfo.CreateAttr("xmlns:"+s.signedInfo.Space, sigNS.Value)
}
}
}
return nil
}
func (s *signatureData) parseSigValue() error {
s.sigValue = ""
sigValueElement := s.signature.SelectElement("SignatureValue")
if sigValueElement != nil {
s.sigValue = sigValueElement.Text()
return nil
}
return errors.New("signedxml: unable to find SignatureValue")
}
func (s *signatureData) parseSigAlgorithm() error {
s.sigAlgorithm = x509.UnknownSignatureAlgorithm
sigMethod := s.signedInfo.SelectElement("SignatureMethod")
var sigAlgoURI string
if sigMethod == nil {
return errors.New("signedxml: Unable to find SignatureMethod element")
}
sigAlgoURI = sigMethod.SelectAttrValue("Algorithm", "")
sigAlgo, ok := signatureAlgorithms[sigAlgoURI]
if ok {
s.sigAlgorithm = sigAlgo
return nil
}
return errors.New("signedxml: Unable to find Algorithm in SignatureMethod element")
}
func (s *signatureData) parseCanonAlgorithm() error {
s.canonAlgorithm = nil
canonMethod := s.signedInfo.SelectElement("CanonicalizationMethod")
var canonAlgoURI string
if canonMethod == nil {
return errors.New("signedxml: Unable to find CanonicalizationMethod element")
}
canonAlgoURI = canonMethod.SelectAttrValue("Algorithm", "")
canonAlgo, ok := CanonicalizationAlgorithms[canonAlgoURI]
if ok {
s.canonAlgorithm = canonAlgo
return nil
}
return errors.New("signedxml: Unable to find Algorithm in " +
"CanonicalizationMethod element")
}
func getReferencedXML(reference *etree.Element, inputDoc *etree.Document) (outputDoc *etree.Document, err error) {
uri := reference.SelectAttrValue("URI", "")
uri = strings.Replace(uri, "#", "", 1)
// populate doc with the referenced xml from the Reference URI
if uri == "" {
outputDoc = inputDoc
} else {
path := fmt.Sprintf(".//[@ID='%s']", uri)
e := inputDoc.FindElement(path)
if e != nil {
outputDoc = etree.NewDocument()
outputDoc.SetRoot(e.Copy())
} else {
// SAML v1.1 Assertions use AssertionID
path := fmt.Sprintf(".//[@AssertionID='%s']", uri)
e := inputDoc.FindElement(path)
if e != nil {
outputDoc = etree.NewDocument()
outputDoc.SetRoot(e.Copy())
}
}
}
if outputDoc == nil {
return nil, errors.New("signedxml: unable to find refereced xml")
}
return outputDoc, nil
}
func getCertFromPEMString(pemString string) (*x509.Certificate, error) {
pubkey := fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----",
pemString)
pemBlock, _ := pem.Decode([]byte(pubkey))
if pemBlock == nil {
return &x509.Certificate{}, errors.New("Could not parse Public Key PEM")
}
if pemBlock.Type != "PUBLIC KEY" {
return &x509.Certificate{}, errors.New("Found wrong key type")
}
cert, err := x509.ParseCertificate(pemBlock.Bytes)
return cert, err
}
func processTransform(transform *etree.Element,
docIn *etree.Document) (docOut *etree.Document, err error) {
transformAlgoURI := transform.SelectAttrValue("Algorithm", "")
if transformAlgoURI == "" {
return nil, errors.New("signedxml: unable to find Algorithm in Transform")
}
transformAlgo, ok := CanonicalizationAlgorithms[transformAlgoURI]
if !ok {
return nil, fmt.Errorf("signedxml: unable to find matching transform"+
"algorithm for %s in CanonicalizationAlgorithms", transformAlgoURI)
}
var transformContent string
if transform.ChildElements() != nil {
tDoc := etree.NewDocument()
tDoc.SetRoot(transform.Copy())
transformContent, err = tDoc.WriteToString()
if err != nil {
return nil, err
}
}
docString, err := docIn.WriteToString()
if err != nil {
return nil, err
}
docString, err = transformAlgo.Process(docString, transformContent)
if err != nil {
return nil, err
}
docOut = etree.NewDocument()
docOut.ReadFromString(docString)
return docOut, nil
}
func calculateHash(reference *etree.Element, doc *etree.Document) (string, error) {
digestMethodElement := reference.SelectElement("DigestMethod")
if digestMethodElement == nil {
return "", errors.New("signedxml: unable to find DigestMethod")
}
digestMethodURI := digestMethodElement.SelectAttrValue("Algorithm", "")
if digestMethodURI == "" {
return "", errors.New("signedxml: unable to find Algorithm in DigestMethod")
}
digestAlgo, ok := hashAlgorithms[digestMethodURI]
if !ok {
return "", fmt.Errorf("signedxml: unable to find matching hash"+
"algorithm for %s in hashAlgorithms", digestMethodURI)
}
doc.WriteSettings.CanonicalEndTags = true
doc.WriteSettings.CanonicalText = true
doc.WriteSettings.CanonicalAttrVal = true
h := digestAlgo.New()
docBytes, err := doc.WriteToBytes()
if err != nil {
return "", err
}
//ioutil.WriteFile("C:/Temp/SignedXML/Suspect.xml", docBytes, 0644)
//s, _ := doc.WriteToString()
//logger.Println(s)
h.Write(docBytes)
d := h.Sum(nil)
calculatedValue := base64.StdEncoding.EncodeToString(d)
return calculatedValue, nil
}

171
vendor/github.com/ma314smith/signedxml/signer.go generated vendored Normal file
View File

@@ -0,0 +1,171 @@
package signedxml
import (
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"errors"
"github.com/beevik/etree"
)
var signingAlgorithms map[x509.SignatureAlgorithm]cryptoHash
func init() {
signingAlgorithms = map[x509.SignatureAlgorithm]cryptoHash{
// MD2 not supported
// x509.MD2WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.MD2},
x509.MD5WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.MD5},
x509.SHA1WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.SHA1},
x509.SHA256WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.SHA256},
x509.SHA384WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.SHA384},
x509.SHA512WithRSA: cryptoHash{algorithm: "rsa", hash: crypto.SHA512},
// DSA not supported
// x509.DSAWithSHA1: cryptoHash{algorithm: "dsa", hash: crypto.SHA1},
// x509.DSAWithSHA256:cryptoHash{algorithm: "dsa", hash: crypto.SHA256},
// Golang ECDSA support is lacking, can't seem to load private keys
// x509.ECDSAWithSHA1: cryptoHash{algorithm: "ecdsa", hash: crypto.SHA1},
// x509.ECDSAWithSHA256: cryptoHash{algorithm: "ecdsa", hash: crypto.SHA256},
// x509.ECDSAWithSHA384: cryptoHash{algorithm: "ecdsa", hash: crypto.SHA384},
// x509.ECDSAWithSHA512: cryptoHash{algorithm: "ecdsa", hash: crypto.SHA512},
}
}
type cryptoHash struct {
algorithm string
hash crypto.Hash
}
// Signer provides options for signing an XML document
type Signer struct {
signatureData
privateKey interface{}
}
// NewSigner returns a *Signer for the XML provided
func NewSigner(xml string) (*Signer, error) {
doc := etree.NewDocument()
err := doc.ReadFromString(xml)
if err != nil {
return nil, err
}
s := &Signer{signatureData: signatureData{xml: doc}}
return s, nil
}
// Sign populates the XML digest and signature based on the parameters present and privateKey given
func (s *Signer) Sign(privateKey interface{}) (string, error) {
s.privateKey = privateKey
if s.signature == nil {
if err := s.parseEnvelopedSignature(); err != nil {
return "", err
}
}
if err := s.parseSignedInfo(); err != nil {
return "", err
}
if err := s.parseSigAlgorithm(); err != nil {
return "", err
}
if err := s.parseCanonAlgorithm(); err != nil {
return "", err
}
if err := s.setDigest(); err != nil {
return "", err
}
if err := s.setSignature(); err != nil {
return "", err
}
xml, err := s.xml.WriteToString()
if err != nil {
return "", err
}
return xml, nil
}
func (s *Signer) setDigest() (err error) {
references := s.signedInfo.FindElements("./Reference")
for _, ref := range references {
doc := s.xml.Copy()
transforms := ref.SelectElement("Transforms")
for _, transform := range transforms.SelectElements("Transform") {
doc, err = processTransform(transform, doc)
if err != nil {
return err
}
}
doc, err := getReferencedXML(ref, doc)
if err != nil {
return err
}
calculatedValue, err := calculateHash(ref, doc)
if err != nil {
return err
}
digestValueElement := ref.SelectElement("DigestValue")
if digestValueElement == nil {
return errors.New("signedxml: unable to find DigestValue")
}
digestValueElement.SetText(calculatedValue)
}
return nil
}
func (s *Signer) setSignature() error {
doc := etree.NewDocument()
doc.SetRoot(s.signedInfo.Copy())
signedInfo, err := doc.WriteToString()
if err != nil {
return err
}
canonSignedInfo, err := s.canonAlgorithm.Process(signedInfo, "")
if err != nil {
return err
}
var hashed, signature []byte
//var h1, h2 *big.Int
signingAlgorithm, ok := signingAlgorithms[s.sigAlgorithm]
if !ok {
return errors.New("signedxml: unsupported algorithm")
}
hasher := signingAlgorithm.hash.New()
hasher.Write([]byte(canonSignedInfo))
hashed = hasher.Sum(nil)
switch signingAlgorithm.algorithm {
case "rsa":
signature, err = rsa.SignPKCS1v15(rand.Reader, s.privateKey.(*rsa.PrivateKey), signingAlgorithm.hash, hashed)
/*
case "dsa":
h1, h2, err = dsa.Sign(rand.Reader, s.privateKey.(*dsa.PrivateKey), hashed)
case "ecdsa":
h1, h2, err = ecdsa.Sign(rand.Reader, s.privateKey.(*ecdsa.PrivateKey), hashed)
*/
}
if err != nil {
return err
}
// DSA and ECDSA has not been validated
/*
if signature == nil && h1 != nil && h2 != nil {
signature = append(h1.Bytes(), h2.Bytes()...)
}
*/
b64 := base64.StdEncoding.EncodeToString(signature)
sigValueElement := s.signature.SelectElement("SignatureValue")
sigValueElement.SetText(b64)
return nil
}

200
vendor/github.com/ma314smith/signedxml/validator.go generated vendored Normal file
View File

@@ -0,0 +1,200 @@
package signedxml
import (
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"log"
"github.com/beevik/etree"
)
// Validator provides options for verifying a signed XML document
type Validator struct {
Certificates []x509.Certificate
signingCert x509.Certificate
signatureData
}
// NewValidator returns a *Validator for the XML provided
func NewValidator(xml string) (*Validator, error) {
doc := etree.NewDocument()
err := doc.ReadFromString(xml)
if err != nil {
return nil, err
}
v := &Validator{signatureData: signatureData{xml: doc}}
return v, nil
}
// SetXML is used to assign the XML document that the Validator will verify
func (v *Validator) SetXML(xml string) error {
doc := etree.NewDocument()
err := doc.ReadFromString(xml)
v.xml = doc
return err
}
// SigningCert returns the certificate, if any, that was used to successfully
// validate the signature of the XML document. This will be a zero value
// x509.Certificate before Validator.Validate is successfully called.
func (v *Validator) SigningCert() x509.Certificate {
return v.signingCert
}
// Validate validates the Reference digest values, and the signature value
// over the SignedInfo.
//
// Deprecated: Use ValidateReferences instead
func (v *Validator) Validate() error {
_, err := v.ValidateReferences()
return err
}
// ValidateReferences validates the Reference digest values, and the signature value
// over the SignedInfo.
//
// If the signature is enveloped in the XML, then it will be used.
// Otherwise, an external signature should be assigned using
// Validator.SetSignature.
//
// The references returned by this method can be used to verify what was signed.
func (v *Validator) ValidateReferences() ([]string, error) {
if err := v.loadValuesFromXML(); err != nil {
return nil, err
}
referenced, err := v.validateReferences()
if err != nil {
return nil, err
}
var ref []string
for _, doc := range referenced {
docStr, err := doc.WriteToString()
if err != nil {
return nil, err
}
ref = append(ref, docStr)
}
err = v.validateSignature()
return ref, err
}
func (v *Validator) loadValuesFromXML() error {
if v.signature == nil {
if err := v.parseEnvelopedSignature(); err != nil {
return err
}
}
if err := v.parseSignedInfo(); err != nil {
return err
}
if err := v.parseSigValue(); err != nil {
return err
}
if err := v.parseSigAlgorithm(); err != nil {
return err
}
if err := v.parseCanonAlgorithm(); err != nil {
return err
}
if err := v.loadCertificates(); err != nil {
return err
}
return nil
}
func (v *Validator) validateReferences() (referenced []*etree.Document, err error) {
references := v.signedInfo.FindElements("./Reference")
for _, ref := range references {
doc := v.xml.Copy()
transforms := ref.SelectElement("Transforms")
for _, transform := range transforms.SelectElements("Transform") {
doc, err = processTransform(transform, doc)
if err != nil {
return nil, err
}
}
doc, err = getReferencedXML(ref, doc)
if err != nil {
return nil, err
}
referenced = append(referenced, doc)
digestValueElement := ref.SelectElement("DigestValue")
if digestValueElement == nil {
return nil, errors.New("signedxml: unable to find DigestValue")
}
digestValue := digestValueElement.Text()
calculatedValue, err := calculateHash(ref, doc)
if err != nil {
return nil, err
}
if calculatedValue != digestValue {
return nil, fmt.Errorf("signedxml: Calculated digest does not match the"+
" expected digestvalue of %s", digestValue)
}
}
return referenced, nil
}
func (v *Validator) validateSignature() error {
doc := etree.NewDocument()
doc.SetRoot(v.signedInfo.Copy())
signedInfo, err := doc.WriteToString()
if err != nil {
return err
}
canonSignedInfo, err := v.canonAlgorithm.Process(signedInfo, "")
if err != nil {
return err
}
b64, err := base64.StdEncoding.DecodeString(v.sigValue)
if err != nil {
return err
}
sig := []byte(b64)
v.signingCert = x509.Certificate{}
for _, cert := range v.Certificates {
err := cert.CheckSignature(v.sigAlgorithm, []byte(canonSignedInfo), sig)
if err == nil {
v.signingCert = cert
return nil
}
}
return errors.New("signedxml: Calculated signature does not match the " +
"SignatureValue provided")
}
func (v *Validator) loadCertificates() error {
// If v.Certificates is already populated, then the client has already set it
// to the desired cert. Otherwise, let's pull the public keys from the XML
if len(v.Certificates) < 1 {
keydata := v.xml.FindElements(".//X509Certificate")
for _, key := range keydata {
cert, err := getCertFromPEMString(key.Text())
if err != nil {
log.Printf("signedxml: Unable to load certificate: (%s). "+
"Looking for another cert.", err)
} else {
v.Certificates = append(v.Certificates, *cert)
}
}
}
if len(v.Certificates) < 1 {
return errors.New("signedxml: a certificate is required, but was not found")
}
return nil
}

24
vendor/github.com/pkg/errors/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

11
vendor/github.com/pkg/errors/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,11 @@
language: go
go_import_path: github.com/pkg/errors
go:
- 1.4.3
- 1.5.4
- 1.6.2
- 1.7.1
- tip
script:
- go test -v ./...

23
vendor/github.com/pkg/errors/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,23 @@
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

52
vendor/github.com/pkg/errors/README.md generated vendored Normal file
View File

@@ -0,0 +1,52 @@
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
Package errors provides simple error handling primitives.
`go get github.com/pkg/errors`
The traditional error handling idiom in Go is roughly akin to
```go
if err != nil {
return err
}
```
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
## Adding context to an error
The errors.Wrap function returns a new error that adds context to the original error. For example
```go
_, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrap(err, "read failed")
}
```
## Retrieving the cause of an error
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
```go
type causer interface {
Cause() error
}
```
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
```go
switch err := errors.Cause(err).(type) {
case *MyError:
// handle specifically
default:
// unknown error
}
```
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
## Contributing
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
Before proposing a change, please discuss your change by raising an issue.
## Licence
BSD-2-Clause

32
vendor/github.com/pkg/errors/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,32 @@
version: build-{build}.{branch}
clone_folder: C:\gopath\src\github.com\pkg\errors
shallow_clone: true # for startup speed
environment:
GOPATH: C:\gopath
platform:
- x64
# http://www.appveyor.com/docs/installed-software
install:
# some helpful output for debugging builds
- go version
- go env
# pre-installed MinGW at C:\MinGW is 32bit only
# but MSYS2 at C:\msys64 has mingw64
- set PATH=C:\msys64\mingw64\bin;%PATH%
- gcc --version
- g++ --version
build_script:
- go install -v ./...
test_script:
- set PATH=C:\gopath\bin;%PATH%
- go test -v ./...
#artifacts:
# - path: '%GOPATH%\bin\*.exe'
deploy: off

269
vendor/github.com/pkg/errors/errors.go generated vendored Normal file
View File

@@ -0,0 +1,269 @@
// Package errors provides simple error handling primitives.
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
//
// which applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// and the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required the errors.WithStack and errors.WithMessage
// functions destructure errors.Wrap into its component operations of annotating
// an error with a stack trace and an a message, respectively.
//
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error which does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// causer interface is not exported by this package, but is considered a part
// of stable public API.
//
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported
//
// %s print the error. If the error has a Cause it will be
// printed recursively
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface.
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// Where errors.StackTrace is defined as
//
// type StackTrace []Frame
//
// The Frame type represents a call site in the stack trace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stack trace of this error. For example:
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d", f)
// }
// }
//
// stackTracer interface is not exported by this package, but is considered a part
// of stable public API.
//
// See the documentation for Frame.Format for more details.
package errors
import (
"fmt"
"io"
)
// New returns an error with the supplied message.
// New also records the stack trace at the point it was called.
func New(message string) error {
return &fundamental{
msg: message,
stack: callers(),
}
}
// Errorf formats according to a format specifier and returns the string
// as a value that satisfies error.
// Errorf also records the stack trace at the point it was called.
func Errorf(format string, args ...interface{}) error {
return &fundamental{
msg: fmt.Sprintf(format, args...),
stack: callers(),
}
}
// fundamental is an error that has a message and a stack, but no caller.
type fundamental struct {
msg string
*stack
}
func (f *fundamental) Error() string { return f.msg }
func (f *fundamental) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
io.WriteString(s, f.msg)
f.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, f.msg)
case 'q':
fmt.Fprintf(s, "%q", f.msg)
}
}
// WithStack annotates err with a stack trace at the point WithStack was called.
// If err is nil, WithStack returns nil.
func WithStack(err error) error {
if err == nil {
return nil
}
return &withStack{
err,
callers(),
}
}
type withStack struct {
error
*stack
}
func (w *withStack) Cause() error { return w.error }
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v", w.Cause())
w.stack.Format(s, verb)
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
case 'q':
fmt.Fprintf(s, "%q", w.Error())
}
}
// Wrap returns an error annotating err with a stack trace
// at the point Wrap is called, and the supplied message.
// If err is nil, Wrap returns nil.
func Wrap(err error, message string) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: message,
}
return &withStack{
err,
callers(),
}
}
// Wrapf returns an error annotating err with a stack trace
// at the point Wrapf is call, and the format specifier.
// If err is nil, Wrapf returns nil.
func Wrapf(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
err = &withMessage{
cause: err,
msg: fmt.Sprintf(format, args...),
}
return &withStack{
err,
callers(),
}
}
// WithMessage annotates err with a new message.
// If err is nil, WithMessage returns nil.
func WithMessage(err error, message string) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: message,
}
}
type withMessage struct {
cause error
msg string
}
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", w.Cause())
io.WriteString(s, w.msg)
return
}
fallthrough
case 's', 'q':
io.WriteString(s, w.Error())
}
}
// Cause returns the underlying cause of the error, if possible.
// An error value has a cause if it implements the following
// interface:
//
// type causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further
// investigation.
func Cause(err error) error {
type causer interface {
Cause() error
}
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}

178
vendor/github.com/pkg/errors/stack.go generated vendored Normal file
View File

@@ -0,0 +1,178 @@
package errors
import (
"fmt"
"io"
"path"
"runtime"
"strings"
)
// Frame represents a program counter inside a stack frame.
type Frame uintptr
// pc returns the program counter for this frame;
// multiple frames may have the same PC value.
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
// file returns the full path to the file that contains the
// function for this Frame's pc.
func (f Frame) file() string {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return "unknown"
}
file, _ := fn.FileLine(f.pc())
return file
}
// line returns the line number of source code of the
// function for this Frame's pc.
func (f Frame) line() int {
fn := runtime.FuncForPC(f.pc())
if fn == nil {
return 0
}
_, line := fn.FileLine(f.pc())
return line
}
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s path of source file relative to the compile time GOPATH
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
switch {
case s.Flag('+'):
pc := f.pc()
fn := runtime.FuncForPC(pc)
if fn == nil {
io.WriteString(s, "unknown")
} else {
file, _ := fn.FileLine(pc)
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
}
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
fmt.Fprintf(s, "%d", f.line())
case 'n':
name := runtime.FuncForPC(f.pc()).Name()
io.WriteString(s, funcname(name))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
f.Format(s, 'd')
}
}
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case s.Flag('+'):
for _, f := range st {
fmt.Fprintf(s, "\n%+v", f)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
fmt.Fprintf(s, "%v", []Frame(st))
}
case 's':
fmt.Fprintf(s, "%s", []Frame(st))
}
}
// stack represents a stack of program counters.
type stack []uintptr
func (s *stack) Format(st fmt.State, verb rune) {
switch verb {
case 'v':
switch {
case st.Flag('+'):
for _, pc := range *s {
f := Frame(pc)
fmt.Fprintf(st, "\n%+v", f)
}
}
}
}
func (s *stack) StackTrace() StackTrace {
f := make([]Frame, len(*s))
for i := 0; i < len(f); i++ {
f[i] = Frame((*s)[i])
}
return f
}
func callers() *stack {
const depth = 32
var pcs [depth]uintptr
n := runtime.Callers(3, pcs[:])
var st stack = pcs[0:n]
return &st
}
// funcname removes the path prefix component of a function's name reported by func.Name().
func funcname(name string) string {
i := strings.LastIndex(name, "/")
name = name[i+1:]
i = strings.Index(name, ".")
return name[i+1:]
}
func trimGOPATH(name, file string) string {
// Here we want to get the source file path relative to the compile time
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
// GOPATH at runtime, but we can infer the number of path segments in the
// GOPATH. We note that fn.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired output. We count separators from the end of the file
// path until it finds two more than in the function name and then move
// one character forward to preserve the initial path segment without a
// leading separator.
const sep = "/"
goal := strings.Count(name, sep) + 2
i := len(file)
for n := 0; n < goal; n++ {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
// not enough separators found, set i so that the slice expression
// below leaves file unmodified
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
file = file[i+len(sep):]
return file
}