summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav')
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file.go796
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_go1.6.go17
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_go1.7.go16
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_test.go1184
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/if.go173
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/if_test.go322
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/README11
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go56
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/example_test.go151
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/marshal.go1223
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go1939
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/read.go692
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/read_test.go744
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go371
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/xml.go1998
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go752
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/litmus_test_server.go94
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/lock.go445
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/lock_test.go731
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/prop.go418
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/prop_test.go613
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/webdav.go702
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/webdav_test.go344
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/xml.go519
-rw-r--r--vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/xml_test.go906
25 files changed, 15217 insertions, 0 deletions
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file.go
new file mode 100644
index 000000000..748118dd3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file.go
@@ -0,0 +1,796 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "encoding/xml"
+ "io"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// slashClean is equivalent to but slightly more efficient than
+// path.Clean("/" + name).
+func slashClean(name string) string {
+ if name == "" || name[0] != '/' {
+ name = "/" + name
+ }
+ return path.Clean(name)
+}
+
+// A FileSystem implements access to a collection of named files. The elements
+// in a file path are separated by slash ('/', U+002F) characters, regardless
+// of host operating system convention.
+//
+// Each method has the same semantics as the os package's function of the same
+// name.
+//
+// Note that the os.Rename documentation says that "OS-specific restrictions
+// might apply". In particular, whether or not renaming a file or directory
+// overwriting another existing file or directory is an error is OS-dependent.
+type FileSystem interface {
+ Mkdir(ctx context.Context, name string, perm os.FileMode) error
+ OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error)
+ RemoveAll(ctx context.Context, name string) error
+ Rename(ctx context.Context, oldName, newName string) error
+ Stat(ctx context.Context, name string) (os.FileInfo, error)
+}
+
+// A File is returned by a FileSystem's OpenFile method and can be served by a
+// Handler.
+//
+// A File may optionally implement the DeadPropsHolder interface, if it can
+// load and save dead properties.
+type File interface {
+ http.File
+ io.Writer
+}
+
+// A Dir implements FileSystem using the native file system restricted to a
+// specific directory tree.
+//
+// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's
+// string value is a filename on the native file system, not a URL, so it is
+// separated by filepath.Separator, which isn't necessarily '/'.
+//
+// An empty Dir is treated as ".".
+type Dir string
+
+func (d Dir) resolve(name string) string {
+ // This implementation is based on Dir.Open's code in the standard net/http package.
+ if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
+ strings.Contains(name, "\x00") {
+ return ""
+ }
+ dir := string(d)
+ if dir == "" {
+ dir = "."
+ }
+ return filepath.Join(dir, filepath.FromSlash(slashClean(name)))
+}
+
+func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
+ if name = d.resolve(name); name == "" {
+ return os.ErrNotExist
+ }
+ return os.Mkdir(name, perm)
+}
+
+func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
+ if name = d.resolve(name); name == "" {
+ return nil, os.ErrNotExist
+ }
+ f, err := os.OpenFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+func (d Dir) RemoveAll(ctx context.Context, name string) error {
+ if name = d.resolve(name); name == "" {
+ return os.ErrNotExist
+ }
+ if name == filepath.Clean(string(d)) {
+ // Prohibit removing the virtual root directory.
+ return os.ErrInvalid
+ }
+ return os.RemoveAll(name)
+}
+
+func (d Dir) Rename(ctx context.Context, oldName, newName string) error {
+ if oldName = d.resolve(oldName); oldName == "" {
+ return os.ErrNotExist
+ }
+ if newName = d.resolve(newName); newName == "" {
+ return os.ErrNotExist
+ }
+ if root := filepath.Clean(string(d)); root == oldName || root == newName {
+ // Prohibit renaming from or to the virtual root directory.
+ return os.ErrInvalid
+ }
+ return os.Rename(oldName, newName)
+}
+
+func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) {
+ if name = d.resolve(name); name == "" {
+ return nil, os.ErrNotExist
+ }
+ return os.Stat(name)
+}
+
+// NewMemFS returns a new in-memory FileSystem implementation.
+func NewMemFS() FileSystem {
+ return &memFS{
+ root: memFSNode{
+ children: make(map[string]*memFSNode),
+ mode: 0660 | os.ModeDir,
+ modTime: time.Now(),
+ },
+ }
+}
+
+// A memFS implements FileSystem, storing all metadata and actual file data
+// in-memory. No limits on filesystem size are used, so it is not recommended
+// this be used where the clients are untrusted.
+//
+// Concurrent access is permitted. The tree structure is protected by a mutex,
+// and each node's contents and metadata are protected by a per-node mutex.
+//
+// TODO: Enforce file permissions.
+type memFS struct {
+ mu sync.Mutex
+ root memFSNode
+}
+
+// TODO: clean up and rationalize the walk/find code.
+
+// walk walks the directory tree for the fullname, calling f at each step. If f
+// returns an error, the walk will be aborted and return that same error.
+//
+// dir is the directory at that step, frag is the name fragment, and final is
+// whether it is the final step. For example, walking "/foo/bar/x" will result
+// in 3 calls to f:
+// - "/", "foo", false
+// - "/foo/", "bar", false
+// - "/foo/bar/", "x", true
+// The frag argument will be empty only if dir is the root node and the walk
+// ends at that root node.
+func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {
+ original := fullname
+ fullname = slashClean(fullname)
+
+ // Strip any leading "/"s to make fullname a relative path, as the walk
+ // starts at fs.root.
+ if fullname[0] == '/' {
+ fullname = fullname[1:]
+ }
+ dir := &fs.root
+
+ for {
+ frag, remaining := fullname, ""
+ i := strings.IndexRune(fullname, '/')
+ final := i < 0
+ if !final {
+ frag, remaining = fullname[:i], fullname[i+1:]
+ }
+ if frag == "" && dir != &fs.root {
+ panic("webdav: empty path fragment for a clean path")
+ }
+ if err := f(dir, frag, final); err != nil {
+ return &os.PathError{
+ Op: op,
+ Path: original,
+ Err: err,
+ }
+ }
+ if final {
+ break
+ }
+ child := dir.children[frag]
+ if child == nil {
+ return &os.PathError{
+ Op: op,
+ Path: original,
+ Err: os.ErrNotExist,
+ }
+ }
+ if !child.mode.IsDir() {
+ return &os.PathError{
+ Op: op,
+ Path: original,
+ Err: os.ErrInvalid,
+ }
+ }
+ dir, fullname = child, remaining
+ }
+ return nil
+}
+
+// find returns the parent of the named node and the relative name fragment
+// from the parent to the child. For example, if finding "/foo/bar/baz" then
+// parent will be the node for "/foo/bar" and frag will be "baz".
+//
+// If the fullname names the root node, then parent, frag and err will be zero.
+//
+// find returns an error if the parent does not already exist or the parent
+// isn't a directory, but it will not return an error per se if the child does
+// not already exist. The error returned is either nil or an *os.PathError
+// whose Op is op.
+func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {
+ err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {
+ if !final {
+ return nil
+ }
+ if frag0 != "" {
+ parent, frag = parent0, frag0
+ }
+ return nil
+ })
+ return parent, frag, err
+}
+
+func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("mkdir", name)
+ if err != nil {
+ return err
+ }
+ if dir == nil {
+ // We can't create the root.
+ return os.ErrInvalid
+ }
+ if _, ok := dir.children[frag]; ok {
+ return os.ErrExist
+ }
+ dir.children[frag] = &memFSNode{
+ children: make(map[string]*memFSNode),
+ mode: perm.Perm() | os.ModeDir,
+ modTime: time.Now(),
+ }
+ return nil
+}
+
+func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("open", name)
+ if err != nil {
+ return nil, err
+ }
+ var n *memFSNode
+ if dir == nil {
+ // We're opening the root.
+ if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
+ return nil, os.ErrPermission
+ }
+ n, frag = &fs.root, "/"
+
+ } else {
+ n = dir.children[frag]
+ if flag&(os.O_SYNC|os.O_APPEND) != 0 {
+ // memFile doesn't support these flags yet.
+ return nil, os.ErrInvalid
+ }
+ if flag&os.O_CREATE != 0 {
+ if flag&os.O_EXCL != 0 && n != nil {
+ return nil, os.ErrExist
+ }
+ if n == nil {
+ n = &memFSNode{
+ mode: perm.Perm(),
+ }
+ dir.children[frag] = n
+ }
+ }
+ if n == nil {
+ return nil, os.ErrNotExist
+ }
+ if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {
+ n.mu.Lock()
+ n.data = nil
+ n.mu.Unlock()
+ }
+ }
+
+ children := make([]os.FileInfo, 0, len(n.children))
+ for cName, c := range n.children {
+ children = append(children, c.stat(cName))
+ }
+ return &memFile{
+ n: n,
+ nameSnapshot: frag,
+ childrenSnapshot: children,
+ }, nil
+}
+
+func (fs *memFS) RemoveAll(ctx context.Context, name string) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("remove", name)
+ if err != nil {
+ return err
+ }
+ if dir == nil {
+ // We can't remove the root.
+ return os.ErrInvalid
+ }
+ delete(dir.children, frag)
+ return nil
+}
+
+func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ oldName = slashClean(oldName)
+ newName = slashClean(newName)
+ if oldName == newName {
+ return nil
+ }
+ if strings.HasPrefix(newName, oldName+"/") {
+ // We can't rename oldName to be a sub-directory of itself.
+ return os.ErrInvalid
+ }
+
+ oDir, oFrag, err := fs.find("rename", oldName)
+ if err != nil {
+ return err
+ }
+ if oDir == nil {
+ // We can't rename from the root.
+ return os.ErrInvalid
+ }
+
+ nDir, nFrag, err := fs.find("rename", newName)
+ if err != nil {
+ return err
+ }
+ if nDir == nil {
+ // We can't rename to the root.
+ return os.ErrInvalid
+ }
+
+ oNode, ok := oDir.children[oFrag]
+ if !ok {
+ return os.ErrNotExist
+ }
+ if oNode.children != nil {
+ if nNode, ok := nDir.children[nFrag]; ok {
+ if nNode.children == nil {
+ return errNotADirectory
+ }
+ if len(nNode.children) != 0 {
+ return errDirectoryNotEmpty
+ }
+ }
+ }
+ delete(oDir.children, oFrag)
+ nDir.children[nFrag] = oNode
+ return nil
+}
+
+func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("stat", name)
+ if err != nil {
+ return nil, err
+ }
+ if dir == nil {
+ // We're stat'ting the root.
+ return fs.root.stat("/"), nil
+ }
+ if n, ok := dir.children[frag]; ok {
+ return n.stat(path.Base(name)), nil
+ }
+ return nil, os.ErrNotExist
+}
+
+// A memFSNode represents a single entry in the in-memory filesystem and also
+// implements os.FileInfo.
+type memFSNode struct {
+ // children is protected by memFS.mu.
+ children map[string]*memFSNode
+
+ mu sync.Mutex
+ data []byte
+ mode os.FileMode
+ modTime time.Time
+ deadProps map[xml.Name]Property
+}
+
+func (n *memFSNode) stat(name string) *memFileInfo {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ return &memFileInfo{
+ name: name,
+ size: int64(len(n.data)),
+ mode: n.mode,
+ modTime: n.modTime,
+ }
+}
+
+func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ if len(n.deadProps) == 0 {
+ return nil, nil
+ }
+ ret := make(map[xml.Name]Property, len(n.deadProps))
+ for k, v := range n.deadProps {
+ ret[k] = v
+ }
+ return ret, nil
+}
+
+func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ pstat := Propstat{Status: http.StatusOK}
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
+ if patch.Remove {
+ delete(n.deadProps, p.XMLName)
+ continue
+ }
+ if n.deadProps == nil {
+ n.deadProps = map[xml.Name]Property{}
+ }
+ n.deadProps[p.XMLName] = p
+ }
+ }
+ return []Propstat{pstat}, nil
+}
+
+type memFileInfo struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+}
+
+func (f *memFileInfo) Name() string { return f.name }
+func (f *memFileInfo) Size() int64 { return f.size }
+func (f *memFileInfo) Mode() os.FileMode { return f.mode }
+func (f *memFileInfo) ModTime() time.Time { return f.modTime }
+func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() }
+func (f *memFileInfo) Sys() interface{} { return nil }
+
+// A memFile is a File implementation for a memFSNode. It is a per-file (not
+// per-node) read/write position, and a snapshot of the memFS' tree structure
+// (a node's name and children) for that node.
+type memFile struct {
+ n *memFSNode
+ nameSnapshot string
+ childrenSnapshot []os.FileInfo
+ // pos is protected by n.mu.
+ pos int
+}
+
+// A *memFile implements the optional DeadPropsHolder interface.
+var _ DeadPropsHolder = (*memFile)(nil)
+
+func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() }
+func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }
+
+func (f *memFile) Close() error {
+ return nil
+}
+
+func (f *memFile) Read(p []byte) (int, error) {
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+ if f.n.mode.IsDir() {
+ return 0, os.ErrInvalid
+ }
+ if f.pos >= len(f.n.data) {
+ return 0, io.EOF
+ }
+ n := copy(p, f.n.data[f.pos:])
+ f.pos += n
+ return n, nil
+}
+
+func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+ if !f.n.mode.IsDir() {
+ return nil, os.ErrInvalid
+ }
+ old := f.pos
+ if old >= len(f.childrenSnapshot) {
+ // The os.File Readdir docs say that at the end of a directory,
+ // the error is io.EOF if count > 0 and nil if count <= 0.
+ if count > 0 {
+ return nil, io.EOF
+ }
+ return nil, nil
+ }
+ if count > 0 {
+ f.pos += count
+ if f.pos > len(f.childrenSnapshot) {
+ f.pos = len(f.childrenSnapshot)
+ }
+ } else {
+ f.pos = len(f.childrenSnapshot)
+ old = 0
+ }
+ return f.childrenSnapshot[old:f.pos], nil
+}
+
+func (f *memFile) Seek(offset int64, whence int) (int64, error) {
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+ npos := f.pos
+ // TODO: How to handle offsets greater than the size of system int?
+ switch whence {
+ case os.SEEK_SET:
+ npos = int(offset)
+ case os.SEEK_CUR:
+ npos += int(offset)
+ case os.SEEK_END:
+ npos = len(f.n.data) + int(offset)
+ default:
+ npos = -1
+ }
+ if npos < 0 {
+ return 0, os.ErrInvalid
+ }
+ f.pos = npos
+ return int64(f.pos), nil
+}
+
+func (f *memFile) Stat() (os.FileInfo, error) {
+ return f.n.stat(f.nameSnapshot), nil
+}
+
+func (f *memFile) Write(p []byte) (int, error) {
+ lenp := len(p)
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+
+ if f.n.mode.IsDir() {
+ return 0, os.ErrInvalid
+ }
+ if f.pos < len(f.n.data) {
+ n := copy(f.n.data[f.pos:], p)
+ f.pos += n
+ p = p[n:]
+ } else if f.pos > len(f.n.data) {
+ // Write permits the creation of holes, if we've seek'ed past the
+ // existing end of file.
+ if f.pos <= cap(f.n.data) {
+ oldLen := len(f.n.data)
+ f.n.data = f.n.data[:f.pos]
+ hole := f.n.data[oldLen:]
+ for i := range hole {
+ hole[i] = 0
+ }
+ } else {
+ d := make([]byte, f.pos, f.pos+len(p))
+ copy(d, f.n.data)
+ f.n.data = d
+ }
+ }
+
+ if len(p) > 0 {
+ // We should only get here if f.pos == len(f.n.data).
+ f.n.data = append(f.n.data, p...)
+ f.pos = len(f.n.data)
+ }
+ f.n.modTime = time.Now()
+ return lenp, nil
+}
+
+// moveFiles moves files and/or directories from src to dst.
+//
+// See section 9.9.4 for when various HTTP status codes apply.
+func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) {
+ created := false
+ if _, err := fs.Stat(ctx, dst); err != nil {
+ if !os.IsNotExist(err) {
+ return http.StatusForbidden, err
+ }
+ created = true
+ } else if overwrite {
+ // Section 9.9.3 says that "If a resource exists at the destination
+ // and the Overwrite header is "T", then prior to performing the move,
+ // the server must perform a DELETE with "Depth: infinity" on the
+ // destination resource.
+ if err := fs.RemoveAll(ctx, dst); err != nil {
+ return http.StatusForbidden, err
+ }
+ } else {
+ return http.StatusPreconditionFailed, os.ErrExist
+ }
+ if err := fs.Rename(ctx, src, dst); err != nil {
+ return http.StatusForbidden, err
+ }
+ if created {
+ return http.StatusCreated, nil
+ }
+ return http.StatusNoContent, nil
+}
+
+func copyProps(dst, src File) error {
+ d, ok := dst.(DeadPropsHolder)
+ if !ok {
+ return nil
+ }
+ s, ok := src.(DeadPropsHolder)
+ if !ok {
+ return nil
+ }
+ m, err := s.DeadProps()
+ if err != nil {
+ return err
+ }
+ props := make([]Property, 0, len(m))
+ for _, prop := range m {
+ props = append(props, prop)
+ }
+ _, err = d.Patch([]Proppatch{{Props: props}})
+ return err
+}
+
+// copyFiles copies files and/or directories from src to dst.
+//
+// See section 9.8.5 for when various HTTP status codes apply.
+func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) {
+ if recursion == 1000 {
+ return http.StatusInternalServerError, errRecursionTooDeep
+ }
+ recursion++
+
+ // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/
+ // into /A/B/ could lead to infinite recursion if not handled correctly."
+
+ srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusInternalServerError, err
+ }
+ defer srcFile.Close()
+ srcStat, err := srcFile.Stat()
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusInternalServerError, err
+ }
+ srcPerm := srcStat.Mode() & os.ModePerm
+
+ created := false
+ if _, err := fs.Stat(ctx, dst); err != nil {
+ if os.IsNotExist(err) {
+ created = true
+ } else {
+ return http.StatusForbidden, err
+ }
+ } else {
+ if !overwrite {
+ return http.StatusPreconditionFailed, os.ErrExist
+ }
+ if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) {
+ return http.StatusForbidden, err
+ }
+ }
+
+ if srcStat.IsDir() {
+ if err := fs.Mkdir(ctx, dst, srcPerm); err != nil {
+ return http.StatusForbidden, err
+ }
+ if depth == infiniteDepth {
+ children, err := srcFile.Readdir(-1)
+ if err != nil {
+ return http.StatusForbidden, err
+ }
+ for _, c := range children {
+ name := c.Name()
+ s := path.Join(src, name)
+ d := path.Join(dst, name)
+ cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion)
+ if cErr != nil {
+ // TODO: MultiStatus.
+ return cStatus, cErr
+ }
+ }
+ }
+
+ } else {
+ dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusConflict, err
+ }
+ return http.StatusForbidden, err
+
+ }
+ _, copyErr := io.Copy(dstFile, srcFile)
+ propsErr := copyProps(dstFile, srcFile)
+ closeErr := dstFile.Close()
+ if copyErr != nil {
+ return http.StatusInternalServerError, copyErr
+ }
+ if propsErr != nil {
+ return http.StatusInternalServerError, propsErr
+ }
+ if closeErr != nil {
+ return http.StatusInternalServerError, closeErr
+ }
+ }
+
+ if created {
+ return http.StatusCreated, nil
+ }
+ return http.StatusNoContent, nil
+}
+
+// walkFS traverses filesystem fs starting at name up to depth levels.
+//
+// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,
+// walkFS calls walkFn. If a visited file system node is a directory and
+// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.
+func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ // This implementation is based on Walk's code in the standard path/filepath package.
+ err := walkFn(name, info, nil)
+ if err != nil {
+ if info.IsDir() && err == filepath.SkipDir {
+ return nil
+ }
+ return err
+ }
+ if !info.IsDir() || depth == 0 {
+ return nil
+ }
+ if depth == 1 {
+ depth = 0
+ }
+
+ // Read directory names.
+ f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
+ if err != nil {
+ return walkFn(name, info, err)
+ }
+ fileInfos, err := f.Readdir(0)
+ f.Close()
+ if err != nil {
+ return walkFn(name, info, err)
+ }
+
+ for _, fileInfo := range fileInfos {
+ filename := path.Join(name, fileInfo.Name())
+ fileInfo, err := fs.Stat(ctx, filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_go1.6.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_go1.6.go
new file mode 100644
index 000000000..fa387700d
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_go1.6.go
@@ -0,0 +1,17 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package webdav
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+func getContext(r *http.Request) context.Context {
+ return context.Background()
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_go1.7.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_go1.7.go
new file mode 100644
index 000000000..d1c3de832
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_go1.7.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package webdav
+
+import (
+ "context"
+ "net/http"
+)
+
+func getContext(r *http.Request) context.Context {
+ return r.Context()
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_test.go
new file mode 100644
index 000000000..bfd96e193
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/file_test.go
@@ -0,0 +1,1184 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+func TestSlashClean(t *testing.T) {
+ testCases := []string{
+ "",
+ ".",
+ "/",
+ "/./",
+ "//",
+ "//.",
+ "//a",
+ "/a",
+ "/a/b/c",
+ "/a//b/./../c/d/",
+ "a",
+ "a/b/c",
+ }
+ for _, tc := range testCases {
+ got := slashClean(tc)
+ want := path.Clean("/" + tc)
+ if got != want {
+ t.Errorf("tc=%q: got %q, want %q", tc, got, want)
+ }
+ }
+}
+
+func TestDirResolve(t *testing.T) {
+ testCases := []struct {
+ dir, name, want string
+ }{
+ {"/", "", "/"},
+ {"/", "/", "/"},
+ {"/", ".", "/"},
+ {"/", "./a", "/a"},
+ {"/", "..", "/"},
+ {"/", "..", "/"},
+ {"/", "../", "/"},
+ {"/", "../.", "/"},
+ {"/", "../a", "/a"},
+ {"/", "../..", "/"},
+ {"/", "../bar/a", "/bar/a"},
+ {"/", "../baz/a", "/baz/a"},
+ {"/", "...", "/..."},
+ {"/", ".../a", "/.../a"},
+ {"/", ".../..", "/"},
+ {"/", "a", "/a"},
+ {"/", "a/./b", "/a/b"},
+ {"/", "a/../../b", "/b"},
+ {"/", "a/../b", "/b"},
+ {"/", "a/b", "/a/b"},
+ {"/", "a/b/c/../../d", "/a/d"},
+ {"/", "a/b/c/../../../d", "/d"},
+ {"/", "a/b/c/../../../../d", "/d"},
+ {"/", "a/b/c/d", "/a/b/c/d"},
+
+ {"/foo/bar", "", "/foo/bar"},
+ {"/foo/bar", "/", "/foo/bar"},
+ {"/foo/bar", ".", "/foo/bar"},
+ {"/foo/bar", "./a", "/foo/bar/a"},
+ {"/foo/bar", "..", "/foo/bar"},
+ {"/foo/bar", "../", "/foo/bar"},
+ {"/foo/bar", "../.", "/foo/bar"},
+ {"/foo/bar", "../a", "/foo/bar/a"},
+ {"/foo/bar", "../..", "/foo/bar"},
+ {"/foo/bar", "../bar/a", "/foo/bar/bar/a"},
+ {"/foo/bar", "../baz/a", "/foo/bar/baz/a"},
+ {"/foo/bar", "...", "/foo/bar/..."},
+ {"/foo/bar", ".../a", "/foo/bar/.../a"},
+ {"/foo/bar", ".../..", "/foo/bar"},
+ {"/foo/bar", "a", "/foo/bar/a"},
+ {"/foo/bar", "a/./b", "/foo/bar/a/b"},
+ {"/foo/bar", "a/../../b", "/foo/bar/b"},
+ {"/foo/bar", "a/../b", "/foo/bar/b"},
+ {"/foo/bar", "a/b", "/foo/bar/a/b"},
+ {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"},
+ {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"},
+ {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"},
+ {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"},
+
+ {"/foo/bar/", "", "/foo/bar"},
+ {"/foo/bar/", "/", "/foo/bar"},
+ {"/foo/bar/", ".", "/foo/bar"},
+ {"/foo/bar/", "./a", "/foo/bar/a"},
+ {"/foo/bar/", "..", "/foo/bar"},
+
+ {"/foo//bar///", "", "/foo/bar"},
+ {"/foo//bar///", "/", "/foo/bar"},
+ {"/foo//bar///", ".", "/foo/bar"},
+ {"/foo//bar///", "./a", "/foo/bar/a"},
+ {"/foo//bar///", "..", "/foo/bar"},
+
+ {"/x/y/z", "ab/c\x00d/ef", ""},
+
+ {".", "", "."},
+ {".", "/", "."},
+ {".", ".", "."},
+ {".", "./a", "a"},
+ {".", "..", "."},
+ {".", "..", "."},
+ {".", "../", "."},
+ {".", "../.", "."},
+ {".", "../a", "a"},
+ {".", "../..", "."},
+ {".", "../bar/a", "bar/a"},
+ {".", "../baz/a", "baz/a"},
+ {".", "...", "..."},
+ {".", ".../a", ".../a"},
+ {".", ".../..", "."},
+ {".", "a", "a"},
+ {".", "a/./b", "a/b"},
+ {".", "a/../../b", "b"},
+ {".", "a/../b", "b"},
+ {".", "a/b", "a/b"},
+ {".", "a/b/c/../../d", "a/d"},
+ {".", "a/b/c/../../../d", "d"},
+ {".", "a/b/c/../../../../d", "d"},
+ {".", "a/b/c/d", "a/b/c/d"},
+
+ {"", "", "."},
+ {"", "/", "."},
+ {"", ".", "."},
+ {"", "./a", "a"},
+ {"", "..", "."},
+ }
+
+ for _, tc := range testCases {
+ d := Dir(filepath.FromSlash(tc.dir))
+ if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want {
+ t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want)
+ }
+ }
+}
+
+func TestWalk(t *testing.T) {
+ type walkStep struct {
+ name, frag string
+ final bool
+ }
+
+ testCases := []struct {
+ dir string
+ want []walkStep
+ }{
+ {"", []walkStep{
+ {"", "", true},
+ }},
+ {"/", []walkStep{
+ {"", "", true},
+ }},
+ {"/a", []walkStep{
+ {"", "a", true},
+ }},
+ {"/a/", []walkStep{
+ {"", "a", true},
+ }},
+ {"/a/b", []walkStep{
+ {"", "a", false},
+ {"a", "b", true},
+ }},
+ {"/a/b/", []walkStep{
+ {"", "a", false},
+ {"a", "b", true},
+ }},
+ {"/a/b/c", []walkStep{
+ {"", "a", false},
+ {"a", "b", false},
+ {"b", "c", true},
+ }},
+ // The following test case is the one mentioned explicitly
+ // in the method description.
+ {"/foo/bar/x", []walkStep{
+ {"", "foo", false},
+ {"foo", "bar", false},
+ {"bar", "x", true},
+ }},
+ }
+
+ ctx := context.Background()
+
+ for _, tc := range testCases {
+ fs := NewMemFS().(*memFS)
+
+ parts := strings.Split(tc.dir, "/")
+ for p := 2; p < len(parts); p++ {
+ d := strings.Join(parts[:p], "/")
+ if err := fs.Mkdir(ctx, d, 0666); err != nil {
+ t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err)
+ }
+ }
+
+ i, prevFrag := 0, ""
+ err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error {
+ got := walkStep{
+ name: prevFrag,
+ frag: frag,
+ final: final,
+ }
+ want := tc.want[i]
+
+ if got != want {
+ return fmt.Errorf("got %+v, want %+v", got, want)
+ }
+ i, prevFrag = i+1, frag
+ return nil
+ })
+ if err != nil {
+ t.Errorf("tc.dir=%q: %v", tc.dir, err)
+ }
+ }
+}
+
+// find appends to ss the names of the named file and its children. It is
+// analogous to the Unix find command.
+//
+// The returned strings are not guaranteed to be in any particular order.
+func find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) {
+ stat, err := fs.Stat(ctx, name)
+ if err != nil {
+ return nil, err
+ }
+ ss = append(ss, name)
+ if stat.IsDir() {
+ f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ children, err := f.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, c := range children {
+ ss, err = find(ctx, ss, fs, path.Join(name, c.Name()))
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return ss, nil
+}
+
+func testFS(t *testing.T, fs FileSystem) {
+ errStr := func(err error) string {
+ switch {
+ case os.IsExist(err):
+ return "errExist"
+ case os.IsNotExist(err):
+ return "errNotExist"
+ case err != nil:
+ return "err"
+ }
+ return "ok"
+ }
+
+ // The non-"find" non-"stat" test cases should change the file system state. The
+ // indentation of the "find"s and "stat"s helps distinguish such test cases.
+ testCases := []string{
+ " stat / want dir",
+ " stat /a want errNotExist",
+ " stat /d want errNotExist",
+ " stat /d/e want errNotExist",
+ "create /a A want ok",
+ " stat /a want 1",
+ "create /d/e EEE want errNotExist",
+ "mk-dir /a want errExist",
+ "mk-dir /d/m want errNotExist",
+ "mk-dir /d want ok",
+ " stat /d want dir",
+ "create /d/e EEE want ok",
+ " stat /d/e want 3",
+ " find / /a /d /d/e",
+ "create /d/f FFFF want ok",
+ "create /d/g GGGGGGG want ok",
+ "mk-dir /d/m want ok",
+ "mk-dir /d/m want errExist",
+ "create /d/m/p PPPPP want ok",
+ " stat /d/e want 3",
+ " stat /d/f want 4",
+ " stat /d/g want 7",
+ " stat /d/h want errNotExist",
+ " stat /d/m want dir",
+ " stat /d/m/p want 5",
+ " find / /a /d /d/e /d/f /d/g /d/m /d/m/p",
+ "rm-all /d want ok",
+ " stat /a want 1",
+ " stat /d want errNotExist",
+ " stat /d/e want errNotExist",
+ " stat /d/f want errNotExist",
+ " stat /d/g want errNotExist",
+ " stat /d/m want errNotExist",
+ " stat /d/m/p want errNotExist",
+ " find / /a",
+ "mk-dir /d/m want errNotExist",
+ "mk-dir /d want ok",
+ "create /d/f FFFF want ok",
+ "rm-all /d/f want ok",
+ "mk-dir /d/m want ok",
+ "rm-all /z want ok",
+ "rm-all / want err",
+ "create /b BB want ok",
+ " stat / want dir",
+ " stat /a want 1",
+ " stat /b want 2",
+ " stat /c want errNotExist",
+ " stat /d want dir",
+ " stat /d/m want dir",
+ " find / /a /b /d /d/m",
+ "move__ o=F /b /c want ok",
+ " stat /b want errNotExist",
+ " stat /c want 2",
+ " stat /d/m want dir",
+ " stat /d/n want errNotExist",
+ " find / /a /c /d /d/m",
+ "move__ o=F /d/m /d/n want ok",
+ "create /d/n/q QQQQ want ok",
+ " stat /d/m want errNotExist",
+ " stat /d/n want dir",
+ " stat /d/n/q want 4",
+ "move__ o=F /d /d/n/z want err",
+ "move__ o=T /c /d/n/q want ok",
+ " stat /c want errNotExist",
+ " stat /d/n/q want 2",
+ " find / /a /d /d/n /d/n/q",
+ "create /d/n/r RRRRR want ok",
+ "mk-dir /u want ok",
+ "mk-dir /u/v want ok",
+ "move__ o=F /d/n /u want errExist",
+ "create /t TTTTTT want ok",
+ "move__ o=F /d/n /t want errExist",
+ "rm-all /t want ok",
+ "move__ o=F /d/n /t want ok",
+ " stat /d want dir",
+ " stat /d/n want errNotExist",
+ " stat /d/n/r want errNotExist",
+ " stat /t want dir",
+ " stat /t/q want 2",
+ " stat /t/r want 5",
+ " find / /a /d /t /t/q /t/r /u /u/v",
+ "move__ o=F /t / want errExist",
+ "move__ o=T /t /u/v want ok",
+ " stat /u/v/r want 5",
+ "move__ o=F / /z want err",
+ " find / /a /d /u /u/v /u/v/q /u/v/r",
+ " stat /a want 1",
+ " stat /b want errNotExist",
+ " stat /c want errNotExist",
+ " stat /u/v/r want 5",
+ "copy__ o=F d=0 /a /b want ok",
+ "copy__ o=T d=0 /a /c want ok",
+ " stat /a want 1",
+ " stat /b want 1",
+ " stat /c want 1",
+ " stat /u/v/r want 5",
+ "copy__ o=F d=0 /u/v/r /b want errExist",
+ " stat /b want 1",
+ "copy__ o=T d=0 /u/v/r /b want ok",
+ " stat /a want 1",
+ " stat /b want 5",
+ " stat /u/v/r want 5",
+ "rm-all /a want ok",
+ "rm-all /b want ok",
+ "mk-dir /u/v/w want ok",
+ "create /u/v/w/s SSSSSSSS want ok",
+ " stat /d want dir",
+ " stat /d/x want errNotExist",
+ " stat /d/y want errNotExist",
+ " stat /u/v/r want 5",
+ " stat /u/v/w/s want 8",
+ " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s",
+ "copy__ o=T d=0 /u/v /d/x want ok",
+ "copy__ o=T d=∞ /u/v /d/y want ok",
+ "rm-all /u want ok",
+ " stat /d/x want dir",
+ " stat /d/x/q want errNotExist",
+ " stat /d/x/r want errNotExist",
+ " stat /d/x/w want errNotExist",
+ " stat /d/x/w/s want errNotExist",
+ " stat /d/y want dir",
+ " stat /d/y/q want 2",
+ " stat /d/y/r want 5",
+ " stat /d/y/w want dir",
+ " stat /d/y/w/s want 8",
+ " stat /u want errNotExist",
+ " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s",
+ "copy__ o=F d=∞ /d/y /d/x want errExist",
+ }
+
+ ctx := context.Background()
+
+ for i, tc := range testCases {
+ tc = strings.TrimSpace(tc)
+ j := strings.IndexByte(tc, ' ')
+ if j < 0 {
+ t.Fatalf("test case #%d %q: invalid command", i, tc)
+ }
+ op, arg := tc[:j], tc[j+1:]
+
+ switch op {
+ default:
+ t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
+
+ case "create":
+ parts := strings.Split(arg, " ")
+ if len(parts) != 4 || parts[2] != "want" {
+ t.Fatalf("test case #%d %q: invalid write", i, tc)
+ }
+ f, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if got := errStr(opErr); got != parts[3] {
+ t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3])
+ }
+ if f != nil {
+ if _, err := f.Write([]byte(parts[1])); err != nil {
+ t.Fatalf("test case #%d %q: Write: %v", i, tc, err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatalf("test case #%d %q: Close: %v", i, tc, err)
+ }
+ }
+
+ case "find":
+ got, err := find(ctx, nil, fs, "/")
+ if err != nil {
+ t.Fatalf("test case #%d %q: find: %v", i, tc, err)
+ }
+ sort.Strings(got)
+ want := strings.Split(arg, " ")
+ if !reflect.DeepEqual(got, want) {
+ t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want)
+ }
+
+ case "copy__", "mk-dir", "move__", "rm-all", "stat":
+ nParts := 3
+ switch op {
+ case "copy__":
+ nParts = 6
+ case "move__":
+ nParts = 5
+ }
+ parts := strings.Split(arg, " ")
+ if len(parts) != nParts {
+ t.Fatalf("test case #%d %q: invalid %s", i, tc, op)
+ }
+
+ got, opErr := "", error(nil)
+ switch op {
+ case "copy__":
+ depth := 0
+ if parts[1] == "d=∞" {
+ depth = infiniteDepth
+ }
+ _, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == "o=T", depth, 0)
+ case "mk-dir":
+ opErr = fs.Mkdir(ctx, parts[0], 0777)
+ case "move__":
+ _, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == "o=T")
+ case "rm-all":
+ opErr = fs.RemoveAll(ctx, parts[0])
+ case "stat":
+ var stat os.FileInfo
+ fileName := parts[0]
+ if stat, opErr = fs.Stat(ctx, fileName); opErr == nil {
+ if stat.IsDir() {
+ got = "dir"
+ } else {
+ got = strconv.Itoa(int(stat.Size()))
+ }
+
+ if fileName == "/" {
+ // For a Dir FileSystem, the virtual file system root maps to a
+ // real file system name like "/tmp/webdav-test012345", which does
+ // not end with "/". We skip such cases.
+ } else if statName := stat.Name(); path.Base(fileName) != statName {
+ t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q",
+ i, tc, fileName, statName)
+ }
+ }
+ }
+ if got == "" {
+ got = errStr(opErr)
+ }
+
+ if parts[len(parts)-2] != "want" {
+ t.Fatalf("test case #%d %q: invalid %s", i, tc, op)
+ }
+ if want := parts[len(parts)-1]; got != want {
+ t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want)
+ }
+ }
+ }
+}
+
+func TestDir(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl":
+ t.Skip("see golang.org/issue/12004")
+ case "plan9":
+ t.Skip("see golang.org/issue/11453")
+ }
+
+ td, err := ioutil.TempDir("", "webdav-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(td)
+ testFS(t, Dir(td))
+}
+
+func TestMemFS(t *testing.T) {
+ testFS(t, NewMemFS())
+}
+
+func TestMemFSRoot(t *testing.T) {
+ ctx := context.Background()
+ fs := NewMemFS()
+ for i := 0; i < 5; i++ {
+ stat, err := fs.Stat(ctx, "/")
+ if err != nil {
+ t.Fatalf("i=%d: Stat: %v", i, err)
+ }
+ if !stat.IsDir() {
+ t.Fatalf("i=%d: Stat.IsDir is false, want true", i)
+ }
+
+ f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("i=%d: OpenFile: %v", i, err)
+ }
+ defer f.Close()
+ children, err := f.Readdir(-1)
+ if err != nil {
+ t.Fatalf("i=%d: Readdir: %v", i, err)
+ }
+ if len(children) != i {
+ t.Fatalf("i=%d: got %d children, want %d", i, len(children), i)
+ }
+
+ if _, err := f.Write(make([]byte, 1)); err == nil {
+ t.Fatalf("i=%d: Write: got nil error, want non-nil", i)
+ }
+
+ if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil {
+ t.Fatalf("i=%d: Mkdir: %v", i, err)
+ }
+ }
+}
+
+func TestMemFileReaddir(t *testing.T) {
+ ctx := context.Background()
+ fs := NewMemFS()
+ if err := fs.Mkdir(ctx, "/foo", 0777); err != nil {
+ t.Fatalf("Mkdir: %v", err)
+ }
+ readdir := func(count int) ([]os.FileInfo, error) {
+ f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0)
+ if err != nil {
+ t.Fatalf("OpenFile: %v", err)
+ }
+ defer f.Close()
+ return f.Readdir(count)
+ }
+ if got, err := readdir(-1); len(got) != 0 || err != nil {
+ t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, <nil>", len(got), err)
+ }
+ if got, err := readdir(+1); len(got) != 0 || err != io.EOF {
+ t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err)
+ }
+}
+
+func TestMemFile(t *testing.T) {
+ testCases := []string{
+ "wantData ",
+ "wantSize 0",
+ "write abc",
+ "wantData abc",
+ "write de",
+ "wantData abcde",
+ "wantSize 5",
+ "write 5*x",
+ "write 4*y+2*z",
+ "write 3*st",
+ "wantData abcdexxxxxyyyyzzststst",
+ "wantSize 22",
+ "seek set 4 want 4",
+ "write EFG",
+ "wantData abcdEFGxxxyyyyzzststst",
+ "wantSize 22",
+ "seek set 2 want 2",
+ "read cdEF",
+ "read Gx",
+ "seek cur 0 want 8",
+ "seek cur 2 want 10",
+ "seek cur -1 want 9",
+ "write J",
+ "wantData abcdEFGxxJyyyyzzststst",
+ "wantSize 22",
+ "seek cur -4 want 6",
+ "write ghijk",
+ "wantData abcdEFghijkyyyzzststst",
+ "wantSize 22",
+ "read yyyz",
+ "seek cur 0 want 15",
+ "write ",
+ "seek cur 0 want 15",
+ "read ",
+ "seek cur 0 want 15",
+ "seek end -3 want 19",
+ "write ZZ",
+ "wantData abcdEFghijkyyyzzstsZZt",
+ "wantSize 22",
+ "write 4*A",
+ "wantData abcdEFghijkyyyzzstsZZAAAA",
+ "wantSize 25",
+ "seek end 0 want 25",
+ "seek end -5 want 20",
+ "read Z+4*A",
+ "write 5*B",
+ "wantData abcdEFghijkyyyzzstsZZAAAABBBBB",
+ "wantSize 30",
+ "seek end 10 want 40",
+ "write C",
+ "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C",
+ "wantSize 41",
+ "write D",
+ "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD",
+ "wantSize 42",
+ "seek set 43 want 43",
+ "write E",
+ "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E",
+ "wantSize 44",
+ "seek set 0 want 0",
+ "write 5*123456789_",
+ "wantData 123456789_123456789_123456789_123456789_123456789_",
+ "wantSize 50",
+ "seek cur 0 want 50",
+ "seek cur -99 want err",
+ }
+
+ ctx := context.Background()
+
+ const filename = "/foo"
+ fs := NewMemFS()
+ f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ t.Fatalf("OpenFile: %v", err)
+ }
+ defer f.Close()
+
+ for i, tc := range testCases {
+ j := strings.IndexByte(tc, ' ')
+ if j < 0 {
+ t.Fatalf("test case #%d %q: invalid command", i, tc)
+ }
+ op, arg := tc[:j], tc[j+1:]
+
+ // Expand an arg like "3*a+2*b" to "aaabb".
+ parts := strings.Split(arg, "+")
+ for j, part := range parts {
+ if k := strings.IndexByte(part, '*'); k >= 0 {
+ repeatCount, repeatStr := part[:k], part[k+1:]
+ n, err := strconv.Atoi(repeatCount)
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount)
+ }
+ parts[j] = strings.Repeat(repeatStr, n)
+ }
+ }
+ arg = strings.Join(parts, "")
+
+ switch op {
+ default:
+ t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
+
+ case "read":
+ buf := make([]byte, len(arg))
+ if _, err := io.ReadFull(f, buf); err != nil {
+ t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err)
+ }
+ if got := string(buf); got != arg {
+ t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg)
+ }
+
+ case "seek":
+ parts := strings.Split(arg, " ")
+ if len(parts) != 4 {
+ t.Fatalf("test case #%d %q: invalid seek", i, tc)
+ }
+
+ whence := 0
+ switch parts[0] {
+ default:
+ t.Fatalf("test case #%d %q: invalid seek whence", i, tc)
+ case "set":
+ whence = os.SEEK_SET
+ case "cur":
+ whence = os.SEEK_CUR
+ case "end":
+ whence = os.SEEK_END
+ }
+ offset, err := strconv.Atoi(parts[1])
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1])
+ }
+
+ if parts[2] != "want" {
+ t.Fatalf("test case #%d %q: invalid seek", i, tc)
+ }
+ if parts[3] == "err" {
+ _, err := f.Seek(int64(offset), whence)
+ if err == nil {
+ t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc)
+ }
+ } else {
+ got, err := f.Seek(int64(offset), whence)
+ if err != nil {
+ t.Fatalf("test case #%d %q: Seek: %v", i, tc, err)
+ }
+ want, err := strconv.Atoi(parts[3])
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3])
+ }
+ if got != int64(want) {
+ t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want)
+ }
+ }
+
+ case "write":
+ n, err := f.Write([]byte(arg))
+ if err != nil {
+ t.Fatalf("test case #%d %q: write: %v", i, tc, err)
+ }
+ if n != len(arg) {
+ t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg))
+ }
+
+ case "wantData":
+ g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666)
+ if err != nil {
+ t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err)
+ }
+ gotBytes, err := ioutil.ReadAll(g)
+ if err != nil {
+ t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err)
+ }
+ for i, c := range gotBytes {
+ if c == '\x00' {
+ gotBytes[i] = '.'
+ }
+ }
+ got := string(gotBytes)
+ if got != arg {
+ t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg)
+ }
+ if err := g.Close(); err != nil {
+ t.Fatalf("test case #%d %q: Close: %v", i, tc, err)
+ }
+
+ case "wantSize":
+ n, err := strconv.Atoi(arg)
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg)
+ }
+ fi, err := fs.Stat(ctx, filename)
+ if err != nil {
+ t.Fatalf("test case #%d %q: Stat: %v", i, tc, err)
+ }
+ if got, want := fi.Size(), int64(n); got != want {
+ t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want)
+ }
+ }
+ }
+}
+
+// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a
+// memFile doesn't allocate a new buffer for each of those N times. Otherwise,
+// calling io.Copy(aMemFile, src) is likely to have quadratic complexity.
+func TestMemFileWriteAllocs(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("gccgo allocates here")
+ }
+ ctx := context.Background()
+ fs := NewMemFS()
+ f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ t.Fatalf("OpenFile: %v", err)
+ }
+ defer f.Close()
+
+ xxx := make([]byte, 1024)
+ for i := range xxx {
+ xxx[i] = 'x'
+ }
+
+ a := testing.AllocsPerRun(100, func() {
+ f.Write(xxx)
+ })
+ // AllocsPerRun returns an integral value, so we compare the rounded-down
+ // number to zero.
+ if a > 0 {
+ t.Fatalf("%v allocs per run, want 0", a)
+ }
+}
+
+func BenchmarkMemFileWrite(b *testing.B) {
+ ctx := context.Background()
+ fs := NewMemFS()
+ xxx := make([]byte, 1024)
+ for i := range xxx {
+ xxx[i] = 'x'
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ b.Fatalf("OpenFile: %v", err)
+ }
+ for j := 0; j < 100; j++ {
+ f.Write(xxx)
+ }
+ if err := f.Close(); err != nil {
+ b.Fatalf("Close: %v", err)
+ }
+ if err := fs.RemoveAll(ctx, "/xxx"); err != nil {
+ b.Fatalf("RemoveAll: %v", err)
+ }
+ }
+}
+
+func TestCopyMoveProps(t *testing.T) {
+ ctx := context.Background()
+ fs := NewMemFS()
+ create := func(name string) error {
+ f, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return err
+ }
+ _, wErr := f.Write([]byte("contents"))
+ cErr := f.Close()
+ if wErr != nil {
+ return wErr
+ }
+ return cErr
+ }
+ patch := func(name string, patches ...Proppatch) error {
+ f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666)
+ if err != nil {
+ return err
+ }
+ _, pErr := f.(DeadPropsHolder).Patch(patches)
+ cErr := f.Close()
+ if pErr != nil {
+ return pErr
+ }
+ return cErr
+ }
+ props := func(name string) (map[xml.Name]Property, error) {
+ f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666)
+ if err != nil {
+ return nil, err
+ }
+ m, pErr := f.(DeadPropsHolder).DeadProps()
+ cErr := f.Close()
+ if pErr != nil {
+ return nil, pErr
+ }
+ if cErr != nil {
+ return nil, cErr
+ }
+ return m, nil
+ }
+
+ p0 := Property{
+ XMLName: xml.Name{Space: "x:", Local: "boat"},
+ InnerXML: []byte("pea-green"),
+ }
+ p1 := Property{
+ XMLName: xml.Name{Space: "x:", Local: "ring"},
+ InnerXML: []byte("1 shilling"),
+ }
+ p2 := Property{
+ XMLName: xml.Name{Space: "x:", Local: "spoon"},
+ InnerXML: []byte("runcible"),
+ }
+ p3 := Property{
+ XMLName: xml.Name{Space: "x:", Local: "moon"},
+ InnerXML: []byte("light"),
+ }
+
+ if err := create("/src"); err != nil {
+ t.Fatalf("create /src: %v", err)
+ }
+ if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil {
+ t.Fatalf("patch /src +p0 +p1: %v", err)
+ }
+ if _, err := copyFiles(ctx, fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil {
+ t.Fatalf("copyFiles /src /tmp: %v", err)
+ }
+ if _, err := moveFiles(ctx, fs, "/tmp", "/dst", true); err != nil {
+ t.Fatalf("moveFiles /tmp /dst: %v", err)
+ }
+ if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil {
+ t.Fatalf("patch /src -p0: %v", err)
+ }
+ if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil {
+ t.Fatalf("patch /src +p2: %v", err)
+ }
+ if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil {
+ t.Fatalf("patch /dst -p1: %v", err)
+ }
+ if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil {
+ t.Fatalf("patch /dst +p3: %v", err)
+ }
+
+ gotSrc, err := props("/src")
+ if err != nil {
+ t.Fatalf("props /src: %v", err)
+ }
+ wantSrc := map[xml.Name]Property{
+ p1.XMLName: p1,
+ p2.XMLName: p2,
+ }
+ if !reflect.DeepEqual(gotSrc, wantSrc) {
+ t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc)
+ }
+
+ gotDst, err := props("/dst")
+ if err != nil {
+ t.Fatalf("props /dst: %v", err)
+ }
+ wantDst := map[xml.Name]Property{
+ p0.XMLName: p0,
+ p3.XMLName: p3,
+ }
+ if !reflect.DeepEqual(gotDst, wantDst) {
+ t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst)
+ }
+}
+
+func TestWalkFS(t *testing.T) {
+ testCases := []struct {
+ desc string
+ buildfs []string
+ startAt string
+ depth int
+ walkFn filepath.WalkFunc
+ want []string
+ }{{
+ "just root",
+ []string{},
+ "/",
+ infiniteDepth,
+ nil,
+ []string{
+ "/",
+ },
+ }, {
+ "infinite walk from root",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/d",
+ "mkdir /e",
+ "touch /f",
+ },
+ "/",
+ infiniteDepth,
+ nil,
+ []string{
+ "/",
+ "/a",
+ "/a/b",
+ "/a/b/c",
+ "/a/d",
+ "/e",
+ "/f",
+ },
+ }, {
+ "infinite walk from subdir",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/d",
+ "mkdir /e",
+ "touch /f",
+ },
+ "/a",
+ infiniteDepth,
+ nil,
+ []string{
+ "/a",
+ "/a/b",
+ "/a/b/c",
+ "/a/d",
+ },
+ }, {
+ "depth 1 walk from root",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/d",
+ "mkdir /e",
+ "touch /f",
+ },
+ "/",
+ 1,
+ nil,
+ []string{
+ "/",
+ "/a",
+ "/e",
+ "/f",
+ },
+ }, {
+ "depth 1 walk from subdir",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/b/g",
+ "mkdir /a/b/g/h",
+ "touch /a/b/g/i",
+ "touch /a/b/g/h/j",
+ },
+ "/a/b",
+ 1,
+ nil,
+ []string{
+ "/a/b",
+ "/a/b/c",
+ "/a/b/g",
+ },
+ }, {
+ "depth 0 walk from subdir",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/b/g",
+ "mkdir /a/b/g/h",
+ "touch /a/b/g/i",
+ "touch /a/b/g/h/j",
+ },
+ "/a/b",
+ 0,
+ nil,
+ []string{
+ "/a/b",
+ },
+ }, {
+ "infinite walk from file",
+ []string{
+ "mkdir /a",
+ "touch /a/b",
+ "touch /a/c",
+ },
+ "/a/b",
+ 0,
+ nil,
+ []string{
+ "/a/b",
+ },
+ }, {
+ "infinite walk with skipped subdir",
+ []string{
+ "mkdir /a",
+ "mkdir /a/b",
+ "touch /a/b/c",
+ "mkdir /a/b/g",
+ "mkdir /a/b/g/h",
+ "touch /a/b/g/i",
+ "touch /a/b/g/h/j",
+ "touch /a/b/z",
+ },
+ "/",
+ infiniteDepth,
+ func(path string, info os.FileInfo, err error) error {
+ if path == "/a/b/g" {
+ return filepath.SkipDir
+ }
+ return nil
+ },
+ []string{
+ "/",
+ "/a",
+ "/a/b",
+ "/a/b/c",
+ "/a/b/z",
+ },
+ }}
+ ctx := context.Background()
+ for _, tc := range testCases {
+ fs, err := buildTestFS(tc.buildfs)
+ if err != nil {
+ t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err)
+ }
+ var got []string
+ traceFn := func(path string, info os.FileInfo, err error) error {
+ if tc.walkFn != nil {
+ err = tc.walkFn(path, info, err)
+ if err != nil {
+ return err
+ }
+ }
+ got = append(got, path)
+ return nil
+ }
+ fi, err := fs.Stat(ctx, tc.startAt)
+ if err != nil {
+ t.Fatalf("%s: cannot stat: %v", tc.desc, err)
+ }
+ err = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn)
+ if err != nil {
+ t.Errorf("%s:\ngot error %v, want nil", tc.desc, err)
+ continue
+ }
+ sort.Strings(got)
+ sort.Strings(tc.want)
+ if !reflect.DeepEqual(got, tc.want) {
+ t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
+
+func buildTestFS(buildfs []string) (FileSystem, error) {
+ // TODO: Could this be merged with the build logic in TestFS?
+
+ ctx := context.Background()
+ fs := NewMemFS()
+ for _, b := range buildfs {
+ op := strings.Split(b, " ")
+ switch op[0] {
+ case "mkdir":
+ err := fs.Mkdir(ctx, op[1], os.ModeDir|0777)
+ if err != nil {
+ return nil, err
+ }
+ case "touch":
+ f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666)
+ if err != nil {
+ return nil, err
+ }
+ f.Close()
+ case "write":
+ f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return nil, err
+ }
+ _, err = f.Write([]byte(op[2]))
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unknown file operation %q", op[0])
+ }
+ }
+ return fs, nil
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/if.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/if.go
new file mode 100644
index 000000000..416e81cdf
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/if.go
@@ -0,0 +1,173 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+// The If header is covered by Section 10.4.
+// http://www.webdav.org/specs/rfc4918.html#HEADER_If
+
+import (
+ "strings"
+)
+
+// ifHeader is a disjunction (OR) of ifLists.
+type ifHeader struct {
+ lists []ifList
+}
+
+// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
+type ifList struct {
+ resourceTag string
+ conditions []Condition
+}
+
+// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
+// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
+// returned by req.Header.Get("If") for a http.Request req.
+func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
+ s := strings.TrimSpace(httpHeader)
+ switch tokenType, _, _ := lex(s); tokenType {
+ case '(':
+ return parseNoTagLists(s)
+ case angleTokenType:
+ return parseTaggedLists(s)
+ default:
+ return ifHeader{}, false
+ }
+}
+
+func parseNoTagLists(s string) (h ifHeader, ok bool) {
+ for {
+ l, remaining, ok := parseList(s)
+ if !ok {
+ return ifHeader{}, false
+ }
+ h.lists = append(h.lists, l)
+ if remaining == "" {
+ return h, true
+ }
+ s = remaining
+ }
+}
+
+func parseTaggedLists(s string) (h ifHeader, ok bool) {
+ resourceTag, n := "", 0
+ for first := true; ; first = false {
+ tokenType, tokenStr, remaining := lex(s)
+ switch tokenType {
+ case angleTokenType:
+ if !first && n == 0 {
+ return ifHeader{}, false
+ }
+ resourceTag, n = tokenStr, 0
+ s = remaining
+ case '(':
+ n++
+ l, remaining, ok := parseList(s)
+ if !ok {
+ return ifHeader{}, false
+ }
+ l.resourceTag = resourceTag
+ h.lists = append(h.lists, l)
+ if remaining == "" {
+ return h, true
+ }
+ s = remaining
+ default:
+ return ifHeader{}, false
+ }
+ }
+}
+
+func parseList(s string) (l ifList, remaining string, ok bool) {
+ tokenType, _, s := lex(s)
+ if tokenType != '(' {
+ return ifList{}, "", false
+ }
+ for {
+ tokenType, _, remaining = lex(s)
+ if tokenType == ')' {
+ if len(l.conditions) == 0 {
+ return ifList{}, "", false
+ }
+ return l, remaining, true
+ }
+ c, remaining, ok := parseCondition(s)
+ if !ok {
+ return ifList{}, "", false
+ }
+ l.conditions = append(l.conditions, c)
+ s = remaining
+ }
+}
+
+func parseCondition(s string) (c Condition, remaining string, ok bool) {
+ tokenType, tokenStr, s := lex(s)
+ if tokenType == notTokenType {
+ c.Not = true
+ tokenType, tokenStr, s = lex(s)
+ }
+ switch tokenType {
+ case strTokenType, angleTokenType:
+ c.Token = tokenStr
+ case squareTokenType:
+ c.ETag = tokenStr
+ default:
+ return Condition{}, "", false
+ }
+ return c, s, true
+}
+
+// Single-rune tokens like '(' or ')' have a token type equal to their rune.
+// All other tokens have a negative token type.
+const (
+ errTokenType = rune(-1)
+ eofTokenType = rune(-2)
+ strTokenType = rune(-3)
+ notTokenType = rune(-4)
+ angleTokenType = rune(-5)
+ squareTokenType = rune(-6)
+)
+
+func lex(s string) (tokenType rune, tokenStr string, remaining string) {
+ // The net/textproto Reader that parses the HTTP header will collapse
+ // Linear White Space that spans multiple "\r\n" lines to a single " ",
+ // so we don't need to look for '\r' or '\n'.
+ for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ return eofTokenType, "", ""
+ }
+ i := 0
+loop:
+ for ; i < len(s); i++ {
+ switch s[i] {
+ case '\t', ' ', '(', ')', '<', '>', '[', ']':
+ break loop
+ }
+ }
+
+ if i != 0 {
+ tokenStr, remaining = s[:i], s[i:]
+ if tokenStr == "Not" {
+ return notTokenType, "", remaining
+ }
+ return strTokenType, tokenStr, remaining
+ }
+
+ j := 0
+ switch s[0] {
+ case '<':
+ j, tokenType = strings.IndexByte(s, '>'), angleTokenType
+ case '[':
+ j, tokenType = strings.IndexByte(s, ']'), squareTokenType
+ default:
+ return rune(s[0]), "", s[1:]
+ }
+ if j < 0 {
+ return errTokenType, "", ""
+ }
+ return tokenType, s[1:j], s[j+1:]
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/if_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/if_test.go
new file mode 100644
index 000000000..aad61a401
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/if_test.go
@@ -0,0 +1,322 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestParseIfHeader(t *testing.T) {
+ // The "section x.y.z" test cases come from section x.y.z of the spec at
+ // http://www.webdav.org/specs/rfc4918.html
+ testCases := []struct {
+ desc string
+ input string
+ want ifHeader
+ }{{
+ "bad: empty",
+ ``,
+ ifHeader{},
+ }, {
+ "bad: no parens",
+ `foobar`,
+ ifHeader{},
+ }, {
+ "bad: empty list #1",
+ `()`,
+ ifHeader{},
+ }, {
+ "bad: empty list #2",
+ `(a) (b c) () (d)`,
+ ifHeader{},
+ }, {
+ "bad: no list after resource #1",
+ `<foo>`,
+ ifHeader{},
+ }, {
+ "bad: no list after resource #2",
+ `<foo> <bar> (a)`,
+ ifHeader{},
+ }, {
+ "bad: no list after resource #3",
+ `<foo> (a) (b) <bar>`,
+ ifHeader{},
+ }, {
+ "bad: no-tag-list followed by tagged-list",
+ `(a) (b) <foo> (c)`,
+ ifHeader{},
+ }, {
+ "bad: unfinished list",
+ `(a`,
+ ifHeader{},
+ }, {
+ "bad: unfinished ETag",
+ `([b`,
+ ifHeader{},
+ }, {
+ "bad: unfinished Notted list",
+ `(Not a`,
+ ifHeader{},
+ }, {
+ "bad: double Not",
+ `(Not Not a)`,
+ ifHeader{},
+ }, {
+ "good: one list with a Token",
+ `(a)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `a`,
+ }},
+ }},
+ },
+ }, {
+ "good: one list with an ETag",
+ `([a])`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ ETag: `a`,
+ }},
+ }},
+ },
+ }, {
+ "good: one list with three Nots",
+ `(Not a Not b Not [d])`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Not: true,
+ Token: `a`,
+ }, {
+ Not: true,
+ Token: `b`,
+ }, {
+ Not: true,
+ ETag: `d`,
+ }},
+ }},
+ },
+ }, {
+ "good: two lists",
+ `(a) (b)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `a`,
+ }},
+ }, {
+ conditions: []Condition{{
+ Token: `b`,
+ }},
+ }},
+ },
+ }, {
+ "good: two Notted lists",
+ `(Not a) (Not b)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Not: true,
+ Token: `a`,
+ }},
+ }, {
+ conditions: []Condition{{
+ Not: true,
+ Token: `b`,
+ }},
+ }},
+ },
+ }, {
+ "section 7.5.1",
+ `<http://www.example.com/users/f/fielding/index.html>
+ (<urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6>)`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `http://www.example.com/users/f/fielding/index.html`,
+ conditions: []Condition{{
+ Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`,
+ }},
+ }},
+ },
+ }, {
+ "section 7.5.2 #1",
+ `(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
+ }},
+ }},
+ },
+ }, {
+ "section 7.5.2 #2",
+ `<http://example.com/locked/>
+ (<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `http://example.com/locked/`,
+ conditions: []Condition{{
+ Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
+ }},
+ }},
+ },
+ }, {
+ "section 7.5.2 #3",
+ `<http://example.com/locked/member>
+ (<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `http://example.com/locked/member`,
+ conditions: []Condition{{
+ Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
+ }},
+ }},
+ },
+ }, {
+ "section 9.9.6",
+ `(<urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4>)
+ (<urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`,
+ }},
+ }, {
+ conditions: []Condition{{
+ Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`,
+ }},
+ }},
+ },
+ }, {
+ "section 9.10.8",
+ `(<urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.6",
+ `(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
+ ["I am an ETag"])
+ (["I am another ETag"])`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }, {
+ ETag: `"I am an ETag"`,
+ }},
+ }, {
+ conditions: []Condition{{
+ ETag: `"I am another ETag"`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.7",
+ `(Not <urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
+ <urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Not: true,
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }, {
+ Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.8",
+ `(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)
+ (Not <DAV:no-lock>)`,
+ ifHeader{
+ lists: []ifList{{
+ conditions: []Condition{{
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }},
+ }, {
+ conditions: []Condition{{
+ Not: true,
+ Token: `DAV:no-lock`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.9",
+ `</resource1>
+ (<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
+ [W/"A weak ETag"]) (["strong ETag"])`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `/resource1`,
+ conditions: []Condition{{
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }, {
+ ETag: `W/"A weak ETag"`,
+ }},
+ }, {
+ resourceTag: `/resource1`,
+ conditions: []Condition{{
+ ETag: `"strong ETag"`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.10",
+ `<http://www.example.com/specs/>
+ (<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `http://www.example.com/specs/`,
+ conditions: []Condition{{
+ Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.11 #1",
+ `</specs/rfc2518.doc> (["4217"])`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `/specs/rfc2518.doc`,
+ conditions: []Condition{{
+ ETag: `"4217"`,
+ }},
+ }},
+ },
+ }, {
+ "section 10.4.11 #2",
+ `</specs/rfc2518.doc> (Not ["4217"])`,
+ ifHeader{
+ lists: []ifList{{
+ resourceTag: `/specs/rfc2518.doc`,
+ conditions: []Condition{{
+ Not: true,
+ ETag: `"4217"`,
+ }},
+ }},
+ },
+ }}
+
+ for _, tc := range testCases {
+ got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1))
+ if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok {
+ t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok)
+ continue
+ }
+ if !reflect.DeepEqual(got, tc.want) {
+ t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/README
new file mode 100644
index 000000000..89656f489
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/README
@@ -0,0 +1,11 @@
+This is a fork of the encoding/xml package at ca1d6c4, the last commit before
+https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name
+space behavior" made late in the lead-up to the Go 1.5 release.
+
+The list of encoding/xml changes is at
+https://go.googlesource.com/go/+log/master/src/encoding/xml
+
+This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is
+released.
+
+See http://golang.org/issue/11841
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go
new file mode 100644
index 000000000..a71284312
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go
@@ -0,0 +1,56 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import "time"
+
+var atomValue = &Feed{
+ XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
+ Title: "Example Feed",
+ Link: []Link{{Href: "http://example.org/"}},
+ Updated: ParseTime("2003-12-13T18:30:02Z"),
+ Author: Person{Name: "John Doe"},
+ Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6",
+
+ Entry: []Entry{
+ {
+ Title: "Atom-Powered Robots Run Amok",
+ Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}},
+ Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
+ Updated: ParseTime("2003-12-13T18:30:02Z"),
+ Summary: NewText("Some text."),
+ },
+ },
+}
+
+var atomXml = `` +
+ `<feed xmlns="http://www.w3.org/2005/Atom" updated="2003-12-13T18:30:02Z">` +
+ `<title>Example Feed</title>` +
+ `<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>` +
+ `<link href="http://example.org/"></link>` +
+ `<author><name>John Doe</name><uri></uri><email></email></author>` +
+ `<entry>` +
+ `<title>Atom-Powered Robots Run Amok</title>` +
+ `<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>` +
+ `<link href="http://example.org/2003/12/13/atom03"></link>` +
+ `<updated>2003-12-13T18:30:02Z</updated>` +
+ `<author><name></name><uri></uri><email></email></author>` +
+ `<summary>Some text.</summary>` +
+ `</entry>` +
+ `</feed>`
+
+func ParseTime(str string) time.Time {
+ t, err := time.Parse(time.RFC3339, str)
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+func NewText(text string) Text {
+ return Text{
+ Body: text,
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/example_test.go
new file mode 100644
index 000000000..21b48dea5
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/example_test.go
@@ -0,0 +1,151 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml_test
+
+import (
+ "encoding/xml"
+ "fmt"
+ "os"
+)
+
+func ExampleMarshalIndent() {
+ type Address struct {
+ City, State string
+ }
+ type Person struct {
+ XMLName xml.Name `xml:"person"`
+ Id int `xml:"id,attr"`
+ FirstName string `xml:"name>first"`
+ LastName string `xml:"name>last"`
+ Age int `xml:"age"`
+ Height float32 `xml:"height,omitempty"`
+ Married bool
+ Address
+ Comment string `xml:",comment"`
+ }
+
+ v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
+ v.Comment = " Need more details. "
+ v.Address = Address{"Hanga Roa", "Easter Island"}
+
+ output, err := xml.MarshalIndent(v, " ", " ")
+ if err != nil {
+ fmt.Printf("error: %v\n", err)
+ }
+
+ os.Stdout.Write(output)
+ // Output:
+ // <person id="13">
+ // <name>
+ // <first>John</first>
+ // <last>Doe</last>
+ // </name>
+ // <age>42</age>
+ // <Married>false</Married>
+ // <City>Hanga Roa</City>
+ // <State>Easter Island</State>
+ // <!-- Need more details. -->
+ // </person>
+}
+
+func ExampleEncoder() {
+ type Address struct {
+ City, State string
+ }
+ type Person struct {
+ XMLName xml.Name `xml:"person"`
+ Id int `xml:"id,attr"`
+ FirstName string `xml:"name>first"`
+ LastName string `xml:"name>last"`
+ Age int `xml:"age"`
+ Height float32 `xml:"height,omitempty"`
+ Married bool
+ Address
+ Comment string `xml:",comment"`
+ }
+
+ v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
+ v.Comment = " Need more details. "
+ v.Address = Address{"Hanga Roa", "Easter Island"}
+
+ enc := xml.NewEncoder(os.Stdout)
+ enc.Indent(" ", " ")
+ if err := enc.Encode(v); err != nil {
+ fmt.Printf("error: %v\n", err)
+ }
+
+ // Output:
+ // <person id="13">
+ // <name>
+ // <first>John</first>
+ // <last>Doe</last>
+ // </name>
+ // <age>42</age>
+ // <Married>false</Married>
+ // <City>Hanga Roa</City>
+ // <State>Easter Island</State>
+ // <!-- Need more details. -->
+ // </person>
+}
+
+// This example demonstrates unmarshaling an XML excerpt into a value with
+// some preset fields. Note that the Phone field isn't modified and that
+// the XML <Company> element is ignored. Also, the Groups field is assigned
+// considering the element path provided in its tag.
+func ExampleUnmarshal() {
+ type Email struct {
+ Where string `xml:"where,attr"`
+ Addr string
+ }
+ type Address struct {
+ City, State string
+ }
+ type Result struct {
+ XMLName xml.Name `xml:"Person"`
+ Name string `xml:"FullName"`
+ Phone string
+ Email []Email
+ Groups []string `xml:"Group>Value"`
+ Address
+ }
+ v := Result{Name: "none", Phone: "none"}
+
+ data := `
+ <Person>
+ <FullName>Grace R. Emlin</FullName>
+ <Company>Example Inc.</Company>
+ <Email where="home">
+ <Addr>gre@example.com</Addr>
+ </Email>
+ <Email where='work'>
+ <Addr>gre@work.com</Addr>
+ </Email>
+ <Group>
+ <Value>Friends</Value>
+ <Value>Squash</Value>
+ </Group>
+ <City>Hanga Roa</City>
+ <State>Easter Island</State>
+ </Person>
+ `
+ err := xml.Unmarshal([]byte(data), &v)
+ if err != nil {
+ fmt.Printf("error: %v", err)
+ return
+ }
+ fmt.Printf("XMLName: %#v\n", v.XMLName)
+ fmt.Printf("Name: %q\n", v.Name)
+ fmt.Printf("Phone: %q\n", v.Phone)
+ fmt.Printf("Email: %v\n", v.Email)
+ fmt.Printf("Groups: %v\n", v.Groups)
+ fmt.Printf("Address: %v\n", v.Address)
+ // Output:
+ // XMLName: xml.Name{Space:"", Local:"Person"}
+ // Name: "Grace R. Emlin"
+ // Phone: "none"
+ // Email: [{home gre@example.com} {work gre@work.com}]
+ // Groups: [Friends Squash]
+ // Address: {Hanga Roa Easter Island}
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/marshal.go
new file mode 100644
index 000000000..cb82ec214
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/marshal.go
@@ -0,0 +1,1223 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ // A generic XML header suitable for use with the output of Marshal.
+ // This is not automatically added to any output of this package,
+ // it is provided as a convenience.
+ Header = `<?xml version="1.0" encoding="UTF-8"?>` + "\n"
+)
+
+// Marshal returns the XML encoding of v.
+//
+// Marshal handles an array or slice by marshalling each of the elements.
+// Marshal handles a pointer by marshalling the value it points at or, if the
+// pointer is nil, by writing nothing. Marshal handles an interface value by
+// marshalling the value it contains or, if the interface value is nil, by
+// writing nothing. Marshal handles all other data by writing one or more XML
+// elements containing the data.
+//
+// The name for the XML elements is taken from, in order of preference:
+// - the tag on the XMLName field, if the data is a struct
+// - the value of the XMLName field of type xml.Name
+// - the tag of the struct field used to obtain the data
+// - the name of the struct field used to obtain the data
+// - the name of the marshalled type
+//
+// The XML element for a struct contains marshalled elements for each of the
+// exported fields of the struct, with these exceptions:
+// - the XMLName field, described above, is omitted.
+// - a field with tag "-" is omitted.
+// - a field with tag "name,attr" becomes an attribute with
+// the given name in the XML element.
+// - a field with tag ",attr" becomes an attribute with the
+// field name in the XML element.
+// - a field with tag ",chardata" is written as character data,
+// not as an XML element.
+// - a field with tag ",innerxml" is written verbatim, not subject
+// to the usual marshalling procedure.
+// - a field with tag ",comment" is written as an XML comment, not
+// subject to the usual marshalling procedure. It must not contain
+// the "--" string within it.
+// - a field with a tag including the "omitempty" option is omitted
+// if the field value is empty. The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or
+// string of length zero.
+// - an anonymous struct field is handled as if the fields of its
+// value were part of the outer struct.
+//
+// If a field uses a tag "a>b>c", then the element c will be nested inside
+// parent elements a and b. Fields that appear next to each other that name
+// the same parent will be enclosed in one XML element.
+//
+// See MarshalIndent for an example.
+//
+// Marshal will return an error if asked to marshal a channel, function, or map.
+func Marshal(v interface{}) ([]byte, error) {
+ var b bytes.Buffer
+ if err := NewEncoder(&b).Encode(v); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// Marshaler is the interface implemented by objects that can marshal
+// themselves into valid XML elements.
+//
+// MarshalXML encodes the receiver as zero or more XML elements.
+// By convention, arrays or slices are typically encoded as a sequence
+// of elements, one per entry.
+// Using start as the element tag is not required, but doing so
+// will enable Unmarshal to match the XML elements to the correct
+// struct field.
+// One common implementation strategy is to construct a separate
+// value with a layout corresponding to the desired XML and then
+// to encode it using e.EncodeElement.
+// Another common strategy is to use repeated calls to e.EncodeToken
+// to generate the XML output one token at a time.
+// The sequence of encoded tokens must make up zero or more valid
+// XML elements.
+type Marshaler interface {
+ MarshalXML(e *Encoder, start StartElement) error
+}
+
+// MarshalerAttr is the interface implemented by objects that can marshal
+// themselves into valid XML attributes.
+//
+// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver.
+// Using name as the attribute name is not required, but doing so
+// will enable Unmarshal to match the attribute to the correct
+// struct field.
+// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute
+// will be generated in the output.
+// MarshalXMLAttr is used only for struct fields with the
+// "attr" option in the field tag.
+type MarshalerAttr interface {
+ MarshalXMLAttr(name Name) (Attr, error)
+}
+
+// MarshalIndent works like Marshal, but each XML element begins on a new
+// indented line that starts with prefix and is followed by one or more
+// copies of indent according to the nesting depth.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ var b bytes.Buffer
+ enc := NewEncoder(&b)
+ enc.Indent(prefix, indent)
+ if err := enc.Encode(v); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// An Encoder writes XML data to an output stream.
+type Encoder struct {
+ p printer
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{printer{Writer: bufio.NewWriter(w)}}
+ e.p.encoder = e
+ return e
+}
+
+// Indent sets the encoder to generate XML in which each element
+// begins on a new indented line that starts with prefix and is followed by
+// one or more copies of indent according to the nesting depth.
+func (enc *Encoder) Indent(prefix, indent string) {
+ enc.p.prefix = prefix
+ enc.p.indent = indent
+}
+
+// Encode writes the XML encoding of v to the stream.
+//
+// See the documentation for Marshal for details about the conversion
+// of Go values to XML.
+//
+// Encode calls Flush before returning.
+func (enc *Encoder) Encode(v interface{}) error {
+ err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil)
+ if err != nil {
+ return err
+ }
+ return enc.p.Flush()
+}
+
+// EncodeElement writes the XML encoding of v to the stream,
+// using start as the outermost tag in the encoding.
+//
+// See the documentation for Marshal for details about the conversion
+// of Go values to XML.
+//
+// EncodeElement calls Flush before returning.
+func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error {
+ err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start)
+ if err != nil {
+ return err
+ }
+ return enc.p.Flush()
+}
+
+var (
+ begComment = []byte("<!--")
+ endComment = []byte("-->")
+ endProcInst = []byte("?>")
+ endDirective = []byte(">")
+)
+
+// EncodeToken writes the given XML token to the stream.
+// It returns an error if StartElement and EndElement tokens are not
+// properly matched.
+//
+// EncodeToken does not call Flush, because usually it is part of a
+// larger operation such as Encode or EncodeElement (or a custom
+// Marshaler's MarshalXML invoked during those), and those will call
+// Flush when finished. Callers that create an Encoder and then invoke
+// EncodeToken directly, without using Encode or EncodeElement, need to
+// call Flush when finished to ensure that the XML is written to the
+// underlying writer.
+//
+// EncodeToken allows writing a ProcInst with Target set to "xml" only
+// as the first token in the stream.
+//
+// When encoding a StartElement holding an XML namespace prefix
+// declaration for a prefix that is not already declared, contained
+// elements (including the StartElement itself) will use the declared
+// prefix when encoding names with matching namespace URIs.
+func (enc *Encoder) EncodeToken(t Token) error {
+
+ p := &enc.p
+ switch t := t.(type) {
+ case StartElement:
+ if err := p.writeStart(&t); err != nil {
+ return err
+ }
+ case EndElement:
+ if err := p.writeEnd(t.Name); err != nil {
+ return err
+ }
+ case CharData:
+ escapeText(p, t, false)
+ case Comment:
+ if bytes.Contains(t, endComment) {
+ return fmt.Errorf("xml: EncodeToken of Comment containing --> marker")
+ }
+ p.WriteString("<!--")
+ p.Write(t)
+ p.WriteString("-->")
+ return p.cachedWriteError()
+ case ProcInst:
+ // First token to be encoded which is also a ProcInst with target of xml
+ // is the xml declaration. The only ProcInst where target of xml is allowed.
+ if t.Target == "xml" && p.Buffered() != 0 {
+ return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded")
+ }
+ if !isNameString(t.Target) {
+ return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target")
+ }
+ if bytes.Contains(t.Inst, endProcInst) {
+ return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker")
+ }
+ p.WriteString("<?")
+ p.WriteString(t.Target)
+ if len(t.Inst) > 0 {
+ p.WriteByte(' ')
+ p.Write(t.Inst)
+ }
+ p.WriteString("?>")
+ case Directive:
+ if !isValidDirective(t) {
+ return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers")
+ }
+ p.WriteString("<!")
+ p.Write(t)
+ p.WriteString(">")
+ default:
+ return fmt.Errorf("xml: EncodeToken of invalid token type")
+
+ }
+ return p.cachedWriteError()
+}
+
+// isValidDirective reports whether dir is a valid directive text,
+// meaning angle brackets are matched, ignoring comments and strings.
+func isValidDirective(dir Directive) bool {
+ var (
+ depth int
+ inquote uint8
+ incomment bool
+ )
+ for i, c := range dir {
+ switch {
+ case incomment:
+ if c == '>' {
+ if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) {
+ incomment = false
+ }
+ }
+ // Just ignore anything in comment
+ case inquote != 0:
+ if c == inquote {
+ inquote = 0
+ }
+ // Just ignore anything within quotes
+ case c == '\'' || c == '"':
+ inquote = c
+ case c == '<':
+ if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) {
+ incomment = true
+ } else {
+ depth++
+ }
+ case c == '>':
+ if depth == 0 {
+ return false
+ }
+ depth--
+ }
+ }
+ return depth == 0 && inquote == 0 && !incomment
+}
+
+// Flush flushes any buffered XML to the underlying writer.
+// See the EncodeToken documentation for details about when it is necessary.
+func (enc *Encoder) Flush() error {
+ return enc.p.Flush()
+}
+
+type printer struct {
+ *bufio.Writer
+ encoder *Encoder
+ seq int
+ indent string
+ prefix string
+ depth int
+ indentedIn bool
+ putNewline bool
+ defaultNS string
+ attrNS map[string]string // map prefix -> name space
+ attrPrefix map[string]string // map name space -> prefix
+ prefixes []printerPrefix
+ tags []Name
+}
+
+// printerPrefix holds a namespace undo record.
+// When an element is popped, the prefix record
+// is set back to the recorded URL. The empty
+// prefix records the URL for the default name space.
+//
+// The start of an element is recorded with an element
+// that has mark=true.
+type printerPrefix struct {
+ prefix string
+ url string
+ mark bool
+}
+
+func (p *printer) prefixForNS(url string, isAttr bool) string {
+ // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml"
+ // and must be referred to that way.
+ // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns",
+ // but users should not be trying to use that one directly - that's our job.)
+ if url == xmlURL {
+ return "xml"
+ }
+ if !isAttr && url == p.defaultNS {
+ // We can use the default name space.
+ return ""
+ }
+ return p.attrPrefix[url]
+}
+
+// defineNS pushes any namespace definition found in the given attribute.
+// If ignoreNonEmptyDefault is true, an xmlns="nonempty"
+// attribute will be ignored.
+func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error {
+ var prefix string
+ if attr.Name.Local == "xmlns" {
+ if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL {
+ return fmt.Errorf("xml: cannot redefine xmlns attribute prefix")
+ }
+ } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" {
+ prefix = attr.Name.Local
+ if attr.Value == "" {
+ // Technically, an empty XML namespace is allowed for an attribute.
+ // From http://www.w3.org/TR/xml-names11/#scoping-defaulting:
+ //
+ // The attribute value in a namespace declaration for a prefix may be
+ // empty. This has the effect, within the scope of the declaration, of removing
+ // any association of the prefix with a namespace name.
+ //
+ // However our namespace prefixes here are used only as hints. There's
+ // no need to respect the removal of a namespace prefix, so we ignore it.
+ return nil
+ }
+ } else {
+ // Ignore: it's not a namespace definition
+ return nil
+ }
+ if prefix == "" {
+ if attr.Value == p.defaultNS {
+ // No need for redefinition.
+ return nil
+ }
+ if attr.Value != "" && ignoreNonEmptyDefault {
+ // We have an xmlns="..." value but
+ // it can't define a name space in this context,
+ // probably because the element has an empty
+ // name space. In this case, we just ignore
+ // the name space declaration.
+ return nil
+ }
+ } else if _, ok := p.attrPrefix[attr.Value]; ok {
+ // There's already a prefix for the given name space,
+ // so use that. This prevents us from
+ // having two prefixes for the same name space
+ // so attrNS and attrPrefix can remain bijective.
+ return nil
+ }
+ p.pushPrefix(prefix, attr.Value)
+ return nil
+}
+
+// createNSPrefix creates a name space prefix attribute
+// to use for the given name space, defining a new prefix
+// if necessary.
+// If isAttr is true, the prefix is to be created for an attribute
+// prefix, which means that the default name space cannot
+// be used.
+func (p *printer) createNSPrefix(url string, isAttr bool) {
+ if _, ok := p.attrPrefix[url]; ok {
+ // We already have a prefix for the given URL.
+ return
+ }
+ switch {
+ case !isAttr && url == p.defaultNS:
+ // We can use the default name space.
+ return
+ case url == "":
+ // The only way we can encode names in the empty
+ // name space is by using the default name space,
+ // so we must use that.
+ if p.defaultNS != "" {
+ // The default namespace is non-empty, so we
+ // need to set it to empty.
+ p.pushPrefix("", "")
+ }
+ return
+ case url == xmlURL:
+ return
+ }
+ // TODO If the URL is an existing prefix, we could
+ // use it as is. That would enable the
+ // marshaling of elements that had been unmarshaled
+ // and with a name space prefix that was not found.
+ // although technically it would be incorrect.
+
+ // Pick a name. We try to use the final element of the path
+ // but fall back to _.
+ prefix := strings.TrimRight(url, "/")
+ if i := strings.LastIndex(prefix, "/"); i >= 0 {
+ prefix = prefix[i+1:]
+ }
+ if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") {
+ prefix = "_"
+ }
+ if strings.HasPrefix(prefix, "xml") {
+ // xmlanything is reserved.
+ prefix = "_" + prefix
+ }
+ if p.attrNS[prefix] != "" {
+ // Name is taken. Find a better one.
+ for p.seq++; ; p.seq++ {
+ if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" {
+ prefix = id
+ break
+ }
+ }
+ }
+
+ p.pushPrefix(prefix, url)
+}
+
+// writeNamespaces writes xmlns attributes for all the
+// namespace prefixes that have been defined in
+// the current element.
+func (p *printer) writeNamespaces() {
+ for i := len(p.prefixes) - 1; i >= 0; i-- {
+ prefix := p.prefixes[i]
+ if prefix.mark {
+ return
+ }
+ p.WriteString(" ")
+ if prefix.prefix == "" {
+ // Default name space.
+ p.WriteString(`xmlns="`)
+ } else {
+ p.WriteString("xmlns:")
+ p.WriteString(prefix.prefix)
+ p.WriteString(`="`)
+ }
+ EscapeText(p, []byte(p.nsForPrefix(prefix.prefix)))
+ p.WriteString(`"`)
+ }
+}
+
+// pushPrefix pushes a new prefix on the prefix stack
+// without checking to see if it is already defined.
+func (p *printer) pushPrefix(prefix, url string) {
+ p.prefixes = append(p.prefixes, printerPrefix{
+ prefix: prefix,
+ url: p.nsForPrefix(prefix),
+ })
+ p.setAttrPrefix(prefix, url)
+}
+
+// nsForPrefix returns the name space for the given
+// prefix. Note that this is not valid for the
+// empty attribute prefix, which always has an empty
+// name space.
+func (p *printer) nsForPrefix(prefix string) string {
+ if prefix == "" {
+ return p.defaultNS
+ }
+ return p.attrNS[prefix]
+}
+
+// markPrefix marks the start of an element on the prefix
+// stack.
+func (p *printer) markPrefix() {
+ p.prefixes = append(p.prefixes, printerPrefix{
+ mark: true,
+ })
+}
+
+// popPrefix pops all defined prefixes for the current
+// element.
+func (p *printer) popPrefix() {
+ for len(p.prefixes) > 0 {
+ prefix := p.prefixes[len(p.prefixes)-1]
+ p.prefixes = p.prefixes[:len(p.prefixes)-1]
+ if prefix.mark {
+ break
+ }
+ p.setAttrPrefix(prefix.prefix, prefix.url)
+ }
+}
+
+// setAttrPrefix sets an attribute name space prefix.
+// If url is empty, the attribute is removed.
+// If prefix is empty, the default name space is set.
+func (p *printer) setAttrPrefix(prefix, url string) {
+ if prefix == "" {
+ p.defaultNS = url
+ return
+ }
+ if url == "" {
+ delete(p.attrPrefix, p.attrNS[prefix])
+ delete(p.attrNS, prefix)
+ return
+ }
+ if p.attrPrefix == nil {
+ // Need to define a new name space.
+ p.attrPrefix = make(map[string]string)
+ p.attrNS = make(map[string]string)
+ }
+ // Remove any old prefix value. This is OK because we maintain a
+ // strict one-to-one mapping between prefix and URL (see
+ // defineNS)
+ delete(p.attrPrefix, p.attrNS[prefix])
+ p.attrPrefix[url] = prefix
+ p.attrNS[prefix] = url
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem()
+ textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+)
+
+// marshalValue writes one or more XML elements representing val.
+// If val was obtained from a struct field, finfo must have its details.
+func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error {
+ if startTemplate != nil && startTemplate.Name.Local == "" {
+ return fmt.Errorf("xml: EncodeElement of StartElement with missing name")
+ }
+
+ if !val.IsValid() {
+ return nil
+ }
+ if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) {
+ return nil
+ }
+
+ // Drill into interfaces and pointers.
+ // This can turn into an infinite loop given a cyclic chain,
+ // but it matches the Go 1 behavior.
+ for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ return nil
+ }
+ val = val.Elem()
+ }
+
+ kind := val.Kind()
+ typ := val.Type()
+
+ // Check for marshaler.
+ if val.CanInterface() && typ.Implements(marshalerType) {
+ return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate))
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(marshalerType) {
+ return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate))
+ }
+ }
+
+ // Check for text marshaler.
+ if val.CanInterface() && typ.Implements(textMarshalerType) {
+ return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate))
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textMarshalerType) {
+ return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate))
+ }
+ }
+
+ // Slices and arrays iterate over the elements. They do not have an enclosing tag.
+ if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 {
+ for i, n := 0, val.Len(); i < n; i++ {
+ if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ tinfo, err := getTypeInfo(typ)
+ if err != nil {
+ return err
+ }
+
+ // Create start element.
+ // Precedence for the XML element name is:
+ // 0. startTemplate
+ // 1. XMLName field in underlying struct;
+ // 2. field name/tag in the struct field; and
+ // 3. type name
+ var start StartElement
+
+ // explicitNS records whether the element's name space has been
+ // explicitly set (for example an XMLName field).
+ explicitNS := false
+
+ if startTemplate != nil {
+ start.Name = startTemplate.Name
+ explicitNS = true
+ start.Attr = append(start.Attr, startTemplate.Attr...)
+ } else if tinfo.xmlname != nil {
+ xmlname := tinfo.xmlname
+ if xmlname.name != "" {
+ start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name
+ } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" {
+ start.Name = v
+ }
+ explicitNS = true
+ }
+ if start.Name.Local == "" && finfo != nil {
+ start.Name.Local = finfo.name
+ if finfo.xmlns != "" {
+ start.Name.Space = finfo.xmlns
+ explicitNS = true
+ }
+ }
+ if start.Name.Local == "" {
+ name := typ.Name()
+ if name == "" {
+ return &UnsupportedTypeError{typ}
+ }
+ start.Name.Local = name
+ }
+
+ // defaultNS records the default name space as set by a xmlns="..."
+ // attribute. We don't set p.defaultNS because we want to let
+ // the attribute writing code (in p.defineNS) be solely responsible
+ // for maintaining that.
+ defaultNS := p.defaultNS
+
+ // Attributes
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ if finfo.flags&fAttr == 0 {
+ continue
+ }
+ attr, err := p.fieldAttr(finfo, val)
+ if err != nil {
+ return err
+ }
+ if attr.Name.Local == "" {
+ continue
+ }
+ start.Attr = append(start.Attr, attr)
+ if attr.Name.Space == "" && attr.Name.Local == "xmlns" {
+ defaultNS = attr.Value
+ }
+ }
+ if !explicitNS {
+ // Historic behavior: elements use the default name space
+ // they are contained in by default.
+ start.Name.Space = defaultNS
+ }
+ // Historic behaviour: an element that's in a namespace sets
+ // the default namespace for all elements contained within it.
+ start.setDefaultNamespace()
+
+ if err := p.writeStart(&start); err != nil {
+ return err
+ }
+
+ if val.Kind() == reflect.Struct {
+ err = p.marshalStruct(tinfo, val)
+ } else {
+ s, b, err1 := p.marshalSimple(typ, val)
+ if err1 != nil {
+ err = err1
+ } else if b != nil {
+ EscapeText(p, b)
+ } else {
+ p.EscapeString(s)
+ }
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := p.writeEnd(start.Name); err != nil {
+ return err
+ }
+
+ return p.cachedWriteError()
+}
+
+// fieldAttr returns the attribute of the given field.
+// If the returned attribute has an empty Name.Local,
+// it should not be used.
+// The given value holds the value containing the field.
+func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) {
+ fv := finfo.value(val)
+ name := Name{Space: finfo.xmlns, Local: finfo.name}
+ if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) {
+ return Attr{}, nil
+ }
+ if fv.Kind() == reflect.Interface && fv.IsNil() {
+ return Attr{}, nil
+ }
+ if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) {
+ attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name)
+ return attr, err
+ }
+ if fv.CanAddr() {
+ pv := fv.Addr()
+ if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) {
+ attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name)
+ return attr, err
+ }
+ }
+ if fv.CanInterface() && fv.Type().Implements(textMarshalerType) {
+ text, err := fv.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return Attr{}, err
+ }
+ return Attr{name, string(text)}, nil
+ }
+ if fv.CanAddr() {
+ pv := fv.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textMarshalerType) {
+ text, err := pv.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return Attr{}, err
+ }
+ return Attr{name, string(text)}, nil
+ }
+ }
+ // Dereference or skip nil pointer, interface values.
+ switch fv.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if fv.IsNil() {
+ return Attr{}, nil
+ }
+ fv = fv.Elem()
+ }
+ s, b, err := p.marshalSimple(fv.Type(), fv)
+ if err != nil {
+ return Attr{}, err
+ }
+ if b != nil {
+ s = string(b)
+ }
+ return Attr{name, s}, nil
+}
+
+// defaultStart returns the default start element to use,
+// given the reflect type, field info, and start template.
+func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement {
+ var start StartElement
+ // Precedence for the XML element name is as above,
+ // except that we do not look inside structs for the first field.
+ if startTemplate != nil {
+ start.Name = startTemplate.Name
+ start.Attr = append(start.Attr, startTemplate.Attr...)
+ } else if finfo != nil && finfo.name != "" {
+ start.Name.Local = finfo.name
+ start.Name.Space = finfo.xmlns
+ } else if typ.Name() != "" {
+ start.Name.Local = typ.Name()
+ } else {
+ // Must be a pointer to a named type,
+ // since it has the Marshaler methods.
+ start.Name.Local = typ.Elem().Name()
+ }
+ // Historic behaviour: elements use the name space of
+ // the element they are contained in by default.
+ if start.Name.Space == "" {
+ start.Name.Space = p.defaultNS
+ }
+ start.setDefaultNamespace()
+ return start
+}
+
+// marshalInterface marshals a Marshaler interface value.
+func (p *printer) marshalInterface(val Marshaler, start StartElement) error {
+ // Push a marker onto the tag stack so that MarshalXML
+ // cannot close the XML tags that it did not open.
+ p.tags = append(p.tags, Name{})
+ n := len(p.tags)
+
+ err := val.MarshalXML(p.encoder, start)
+ if err != nil {
+ return err
+ }
+
+ // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark.
+ if len(p.tags) > n {
+ return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local)
+ }
+ p.tags = p.tags[:n-1]
+ return nil
+}
+
+// marshalTextInterface marshals a TextMarshaler interface value.
+func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error {
+ if err := p.writeStart(&start); err != nil {
+ return err
+ }
+ text, err := val.MarshalText()
+ if err != nil {
+ return err
+ }
+ EscapeText(p, text)
+ return p.writeEnd(start.Name)
+}
+
+// writeStart writes the given start element.
+func (p *printer) writeStart(start *StartElement) error {
+ if start.Name.Local == "" {
+ return fmt.Errorf("xml: start tag with no name")
+ }
+
+ p.tags = append(p.tags, start.Name)
+ p.markPrefix()
+ // Define any name spaces explicitly declared in the attributes.
+ // We do this as a separate pass so that explicitly declared prefixes
+ // will take precedence over implicitly declared prefixes
+ // regardless of the order of the attributes.
+ ignoreNonEmptyDefault := start.Name.Space == ""
+ for _, attr := range start.Attr {
+ if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil {
+ return err
+ }
+ }
+ // Define any new name spaces implied by the attributes.
+ for _, attr := range start.Attr {
+ name := attr.Name
+ // From http://www.w3.org/TR/xml-names11/#defaulting
+ // "Default namespace declarations do not apply directly
+ // to attribute names; the interpretation of unprefixed
+ // attributes is determined by the element on which they
+ // appear."
+ // This means we don't need to create a new namespace
+ // when an attribute name space is empty.
+ if name.Space != "" && !name.isNamespace() {
+ p.createNSPrefix(name.Space, true)
+ }
+ }
+ p.createNSPrefix(start.Name.Space, false)
+
+ p.writeIndent(1)
+ p.WriteByte('<')
+ p.writeName(start.Name, false)
+ p.writeNamespaces()
+ for _, attr := range start.Attr {
+ name := attr.Name
+ if name.Local == "" || name.isNamespace() {
+ // Namespaces have already been written by writeNamespaces above.
+ continue
+ }
+ p.WriteByte(' ')
+ p.writeName(name, true)
+ p.WriteString(`="`)
+ p.EscapeString(attr.Value)
+ p.WriteByte('"')
+ }
+ p.WriteByte('>')
+ return nil
+}
+
+// writeName writes the given name. It assumes
+// that p.createNSPrefix(name) has already been called.
+func (p *printer) writeName(name Name, isAttr bool) {
+ if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" {
+ p.WriteString(prefix)
+ p.WriteByte(':')
+ }
+ p.WriteString(name.Local)
+}
+
+func (p *printer) writeEnd(name Name) error {
+ if name.Local == "" {
+ return fmt.Errorf("xml: end tag with no name")
+ }
+ if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" {
+ return fmt.Errorf("xml: end tag </%s> without start tag", name.Local)
+ }
+ if top := p.tags[len(p.tags)-1]; top != name {
+ if top.Local != name.Local {
+ return fmt.Errorf("xml: end tag </%s> does not match start tag <%s>", name.Local, top.Local)
+ }
+ return fmt.Errorf("xml: end tag </%s> in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space)
+ }
+ p.tags = p.tags[:len(p.tags)-1]
+
+ p.writeIndent(-1)
+ p.WriteByte('<')
+ p.WriteByte('/')
+ p.writeName(name, false)
+ p.WriteByte('>')
+ p.popPrefix()
+ return nil
+}
+
+func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) {
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(val.Int(), 10), nil, nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(val.Uint(), 10), nil, nil
+ case reflect.Float32, reflect.Float64:
+ return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil
+ case reflect.String:
+ return val.String(), nil, nil
+ case reflect.Bool:
+ return strconv.FormatBool(val.Bool()), nil, nil
+ case reflect.Array:
+ if typ.Elem().Kind() != reflect.Uint8 {
+ break
+ }
+ // [...]byte
+ var bytes []byte
+ if val.CanAddr() {
+ bytes = val.Slice(0, val.Len()).Bytes()
+ } else {
+ bytes = make([]byte, val.Len())
+ reflect.Copy(reflect.ValueOf(bytes), val)
+ }
+ return "", bytes, nil
+ case reflect.Slice:
+ if typ.Elem().Kind() != reflect.Uint8 {
+ break
+ }
+ // []byte
+ return "", val.Bytes(), nil
+ }
+ return "", nil, &UnsupportedTypeError{typ}
+}
+
+var ddBytes = []byte("--")
+
+func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error {
+ s := parentStack{p: p}
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ if finfo.flags&fAttr != 0 {
+ continue
+ }
+ vf := finfo.value(val)
+
+ // Dereference or skip nil pointer, interface values.
+ switch vf.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if !vf.IsNil() {
+ vf = vf.Elem()
+ }
+ }
+
+ switch finfo.flags & fMode {
+ case fCharData:
+ if err := s.setParents(&noField, reflect.Value{}); err != nil {
+ return err
+ }
+ if vf.CanInterface() && vf.Type().Implements(textMarshalerType) {
+ data, err := vf.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return err
+ }
+ Escape(p, data)
+ continue
+ }
+ if vf.CanAddr() {
+ pv := vf.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textMarshalerType) {
+ data, err := pv.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return err
+ }
+ Escape(p, data)
+ continue
+ }
+ }
+ var scratch [64]byte
+ switch vf.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10))
+ case reflect.Float32, reflect.Float64:
+ Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits()))
+ case reflect.Bool:
+ Escape(p, strconv.AppendBool(scratch[:0], vf.Bool()))
+ case reflect.String:
+ if err := EscapeText(p, []byte(vf.String())); err != nil {
+ return err
+ }
+ case reflect.Slice:
+ if elem, ok := vf.Interface().([]byte); ok {
+ if err := EscapeText(p, elem); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+
+ case fComment:
+ if err := s.setParents(&noField, reflect.Value{}); err != nil {
+ return err
+ }
+ k := vf.Kind()
+ if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) {
+ return fmt.Errorf("xml: bad type for comment field of %s", val.Type())
+ }
+ if vf.Len() == 0 {
+ continue
+ }
+ p.writeIndent(0)
+ p.WriteString("<!--")
+ dashDash := false
+ dashLast := false
+ switch k {
+ case reflect.String:
+ s := vf.String()
+ dashDash = strings.Index(s, "--") >= 0
+ dashLast = s[len(s)-1] == '-'
+ if !dashDash {
+ p.WriteString(s)
+ }
+ case reflect.Slice:
+ b := vf.Bytes()
+ dashDash = bytes.Index(b, ddBytes) >= 0
+ dashLast = b[len(b)-1] == '-'
+ if !dashDash {
+ p.Write(b)
+ }
+ default:
+ panic("can't happen")
+ }
+ if dashDash {
+ return fmt.Errorf(`xml: comments must not contain "--"`)
+ }
+ if dashLast {
+ // "--->" is invalid grammar. Make it "- -->"
+ p.WriteByte(' ')
+ }
+ p.WriteString("-->")
+ continue
+
+ case fInnerXml:
+ iface := vf.Interface()
+ switch raw := iface.(type) {
+ case []byte:
+ p.Write(raw)
+ continue
+ case string:
+ p.WriteString(raw)
+ continue
+ }
+
+ case fElement, fElement | fAny:
+ if err := s.setParents(finfo, vf); err != nil {
+ return err
+ }
+ }
+ if err := p.marshalValue(vf, finfo, nil); err != nil {
+ return err
+ }
+ }
+ if err := s.setParents(&noField, reflect.Value{}); err != nil {
+ return err
+ }
+ return p.cachedWriteError()
+}
+
+var noField fieldInfo
+
+// return the bufio Writer's cached write error
+func (p *printer) cachedWriteError() error {
+ _, err := p.Write(nil)
+ return err
+}
+
+func (p *printer) writeIndent(depthDelta int) {
+ if len(p.prefix) == 0 && len(p.indent) == 0 {
+ return
+ }
+ if depthDelta < 0 {
+ p.depth--
+ if p.indentedIn {
+ p.indentedIn = false
+ return
+ }
+ p.indentedIn = false
+ }
+ if p.putNewline {
+ p.WriteByte('\n')
+ } else {
+ p.putNewline = true
+ }
+ if len(p.prefix) > 0 {
+ p.WriteString(p.prefix)
+ }
+ if len(p.indent) > 0 {
+ for i := 0; i < p.depth; i++ {
+ p.WriteString(p.indent)
+ }
+ }
+ if depthDelta > 0 {
+ p.depth++
+ p.indentedIn = true
+ }
+}
+
+type parentStack struct {
+ p *printer
+ xmlns string
+ parents []string
+}
+
+// setParents sets the stack of current parents to those found in finfo.
+// It only writes the start elements if vf holds a non-nil value.
+// If finfo is &noField, it pops all elements.
+func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error {
+ xmlns := s.p.defaultNS
+ if finfo.xmlns != "" {
+ xmlns = finfo.xmlns
+ }
+ commonParents := 0
+ if xmlns == s.xmlns {
+ for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ {
+ if finfo.parents[commonParents] != s.parents[commonParents] {
+ break
+ }
+ }
+ }
+ // Pop off any parents that aren't in common with the previous field.
+ for i := len(s.parents) - 1; i >= commonParents; i-- {
+ if err := s.p.writeEnd(Name{
+ Space: s.xmlns,
+ Local: s.parents[i],
+ }); err != nil {
+ return err
+ }
+ }
+ s.parents = finfo.parents
+ s.xmlns = xmlns
+ if commonParents >= len(s.parents) {
+ // No new elements to push.
+ return nil
+ }
+ if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() {
+ // The element is nil, so no need for the start elements.
+ s.parents = s.parents[:commonParents]
+ return nil
+ }
+ // Push any new parents required.
+ for _, name := range s.parents[commonParents:] {
+ start := &StartElement{
+ Name: Name{
+ Space: s.xmlns,
+ Local: name,
+ },
+ }
+ // Set the default name space for parent elements
+ // to match what we do with other elements.
+ if s.xmlns != s.p.defaultNS {
+ start.setDefaultNamespace()
+ }
+ if err := s.p.writeStart(start); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A MarshalXMLError is returned when Marshal encounters a type
+// that cannot be converted into XML.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "xml: unsupported type: " + e.Type.String()
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go
new file mode 100644
index 000000000..226cfd013
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go
@@ -0,0 +1,1939 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+type DriveType int
+
+const (
+ HyperDrive DriveType = iota
+ ImprobabilityDrive
+)
+
+type Passenger struct {
+ Name []string `xml:"name"`
+ Weight float32 `xml:"weight"`
+}
+
+type Ship struct {
+ XMLName struct{} `xml:"spaceship"`
+
+ Name string `xml:"name,attr"`
+ Pilot string `xml:"pilot,attr"`
+ Drive DriveType `xml:"drive"`
+ Age uint `xml:"age"`
+ Passenger []*Passenger `xml:"passenger"`
+ secret string
+}
+
+type NamedType string
+
+type Port struct {
+ XMLName struct{} `xml:"port"`
+ Type string `xml:"type,attr,omitempty"`
+ Comment string `xml:",comment"`
+ Number string `xml:",chardata"`
+}
+
+type Domain struct {
+ XMLName struct{} `xml:"domain"`
+ Country string `xml:",attr,omitempty"`
+ Name []byte `xml:",chardata"`
+ Comment []byte `xml:",comment"`
+}
+
+type Book struct {
+ XMLName struct{} `xml:"book"`
+ Title string `xml:",chardata"`
+}
+
+type Event struct {
+ XMLName struct{} `xml:"event"`
+ Year int `xml:",chardata"`
+}
+
+type Movie struct {
+ XMLName struct{} `xml:"movie"`
+ Length uint `xml:",chardata"`
+}
+
+type Pi struct {
+ XMLName struct{} `xml:"pi"`
+ Approximation float32 `xml:",chardata"`
+}
+
+type Universe struct {
+ XMLName struct{} `xml:"universe"`
+ Visible float64 `xml:",chardata"`
+}
+
+type Particle struct {
+ XMLName struct{} `xml:"particle"`
+ HasMass bool `xml:",chardata"`
+}
+
+type Departure struct {
+ XMLName struct{} `xml:"departure"`
+ When time.Time `xml:",chardata"`
+}
+
+type SecretAgent struct {
+ XMLName struct{} `xml:"agent"`
+ Handle string `xml:"handle,attr"`
+ Identity string
+ Obfuscate string `xml:",innerxml"`
+}
+
+type NestedItems struct {
+ XMLName struct{} `xml:"result"`
+ Items []string `xml:">item"`
+ Item1 []string `xml:"Items>item1"`
+}
+
+type NestedOrder struct {
+ XMLName struct{} `xml:"result"`
+ Field1 string `xml:"parent>c"`
+ Field2 string `xml:"parent>b"`
+ Field3 string `xml:"parent>a"`
+}
+
+type MixedNested struct {
+ XMLName struct{} `xml:"result"`
+ A string `xml:"parent1>a"`
+ B string `xml:"b"`
+ C string `xml:"parent1>parent2>c"`
+ D string `xml:"parent1>d"`
+}
+
+type NilTest struct {
+ A interface{} `xml:"parent1>parent2>a"`
+ B interface{} `xml:"parent1>b"`
+ C interface{} `xml:"parent1>parent2>c"`
+}
+
+type Service struct {
+ XMLName struct{} `xml:"service"`
+ Domain *Domain `xml:"host>domain"`
+ Port *Port `xml:"host>port"`
+ Extra1 interface{}
+ Extra2 interface{} `xml:"host>extra2"`
+}
+
+var nilStruct *Ship
+
+type EmbedA struct {
+ EmbedC
+ EmbedB EmbedB
+ FieldA string
+}
+
+type EmbedB struct {
+ FieldB string
+ *EmbedC
+}
+
+type EmbedC struct {
+ FieldA1 string `xml:"FieldA>A1"`
+ FieldA2 string `xml:"FieldA>A2"`
+ FieldB string
+ FieldC string
+}
+
+type NameCasing struct {
+ XMLName struct{} `xml:"casing"`
+ Xy string
+ XY string
+ XyA string `xml:"Xy,attr"`
+ XYA string `xml:"XY,attr"`
+}
+
+type NamePrecedence struct {
+ XMLName Name `xml:"Parent"`
+ FromTag XMLNameWithoutTag `xml:"InTag"`
+ FromNameVal XMLNameWithoutTag
+ FromNameTag XMLNameWithTag
+ InFieldName string
+}
+
+type XMLNameWithTag struct {
+ XMLName Name `xml:"InXMLNameTag"`
+ Value string `xml:",chardata"`
+}
+
+type XMLNameWithNSTag struct {
+ XMLName Name `xml:"ns InXMLNameWithNSTag"`
+ Value string `xml:",chardata"`
+}
+
+type XMLNameWithoutTag struct {
+ XMLName Name
+ Value string `xml:",chardata"`
+}
+
+type NameInField struct {
+ Foo Name `xml:"ns foo"`
+}
+
+type AttrTest struct {
+ Int int `xml:",attr"`
+ Named int `xml:"int,attr"`
+ Float float64 `xml:",attr"`
+ Uint8 uint8 `xml:",attr"`
+ Bool bool `xml:",attr"`
+ Str string `xml:",attr"`
+ Bytes []byte `xml:",attr"`
+}
+
+type OmitAttrTest struct {
+ Int int `xml:",attr,omitempty"`
+ Named int `xml:"int,attr,omitempty"`
+ Float float64 `xml:",attr,omitempty"`
+ Uint8 uint8 `xml:",attr,omitempty"`
+ Bool bool `xml:",attr,omitempty"`
+ Str string `xml:",attr,omitempty"`
+ Bytes []byte `xml:",attr,omitempty"`
+}
+
+type OmitFieldTest struct {
+ Int int `xml:",omitempty"`
+ Named int `xml:"int,omitempty"`
+ Float float64 `xml:",omitempty"`
+ Uint8 uint8 `xml:",omitempty"`
+ Bool bool `xml:",omitempty"`
+ Str string `xml:",omitempty"`
+ Bytes []byte `xml:",omitempty"`
+ Ptr *PresenceTest `xml:",omitempty"`
+}
+
+type AnyTest struct {
+ XMLName struct{} `xml:"a"`
+ Nested string `xml:"nested>value"`
+ AnyField AnyHolder `xml:",any"`
+}
+
+type AnyOmitTest struct {
+ XMLName struct{} `xml:"a"`
+ Nested string `xml:"nested>value"`
+ AnyField *AnyHolder `xml:",any,omitempty"`
+}
+
+type AnySliceTest struct {
+ XMLName struct{} `xml:"a"`
+ Nested string `xml:"nested>value"`
+ AnyField []AnyHolder `xml:",any"`
+}
+
+type AnyHolder struct {
+ XMLName Name
+ XML string `xml:",innerxml"`
+}
+
+type RecurseA struct {
+ A string
+ B *RecurseB
+}
+
+type RecurseB struct {
+ A *RecurseA
+ B string
+}
+
+type PresenceTest struct {
+ Exists *struct{}
+}
+
+type IgnoreTest struct {
+ PublicSecret string `xml:"-"`
+}
+
+type MyBytes []byte
+
+type Data struct {
+ Bytes []byte
+ Attr []byte `xml:",attr"`
+ Custom MyBytes
+}
+
+type Plain struct {
+ V interface{}
+}
+
+type MyInt int
+
+type EmbedInt struct {
+ MyInt
+}
+
+type Strings struct {
+ X []string `xml:"A>B,omitempty"`
+}
+
+type PointerFieldsTest struct {
+ XMLName Name `xml:"dummy"`
+ Name *string `xml:"name,attr"`
+ Age *uint `xml:"age,attr"`
+ Empty *string `xml:"empty,attr"`
+ Contents *string `xml:",chardata"`
+}
+
+type ChardataEmptyTest struct {
+ XMLName Name `xml:"test"`
+ Contents *string `xml:",chardata"`
+}
+
+type MyMarshalerTest struct {
+}
+
+var _ Marshaler = (*MyMarshalerTest)(nil)
+
+func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error {
+ e.EncodeToken(start)
+ e.EncodeToken(CharData([]byte("hello world")))
+ e.EncodeToken(EndElement{start.Name})
+ return nil
+}
+
+type MyMarshalerAttrTest struct{}
+
+var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil)
+
+func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) {
+ return Attr{name, "hello world"}, nil
+}
+
+type MyMarshalerValueAttrTest struct{}
+
+var _ MarshalerAttr = MyMarshalerValueAttrTest{}
+
+func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) {
+ return Attr{name, "hello world"}, nil
+}
+
+type MarshalerStruct struct {
+ Foo MyMarshalerAttrTest `xml:",attr"`
+}
+
+type MarshalerValueStruct struct {
+ Foo MyMarshalerValueAttrTest `xml:",attr"`
+}
+
+type InnerStruct struct {
+ XMLName Name `xml:"testns outer"`
+}
+
+type OuterStruct struct {
+ InnerStruct
+ IntAttr int `xml:"int,attr"`
+}
+
+type OuterNamedStruct struct {
+ InnerStruct
+ XMLName Name `xml:"outerns test"`
+ IntAttr int `xml:"int,attr"`
+}
+
+type OuterNamedOrderedStruct struct {
+ XMLName Name `xml:"outerns test"`
+ InnerStruct
+ IntAttr int `xml:"int,attr"`
+}
+
+type OuterOuterStruct struct {
+ OuterStruct
+}
+
+type NestedAndChardata struct {
+ AB []string `xml:"A>B"`
+ Chardata string `xml:",chardata"`
+}
+
+type NestedAndComment struct {
+ AB []string `xml:"A>B"`
+ Comment string `xml:",comment"`
+}
+
+type XMLNSFieldStruct struct {
+ Ns string `xml:"xmlns,attr"`
+ Body string
+}
+
+type NamedXMLNSFieldStruct struct {
+ XMLName struct{} `xml:"testns test"`
+ Ns string `xml:"xmlns,attr"`
+ Body string
+}
+
+type XMLNSFieldStructWithOmitEmpty struct {
+ Ns string `xml:"xmlns,attr,omitempty"`
+ Body string
+}
+
+type NamedXMLNSFieldStructWithEmptyNamespace struct {
+ XMLName struct{} `xml:"test"`
+ Ns string `xml:"xmlns,attr"`
+ Body string
+}
+
+type RecursiveXMLNSFieldStruct struct {
+ Ns string `xml:"xmlns,attr"`
+ Body *RecursiveXMLNSFieldStruct `xml:",omitempty"`
+ Text string `xml:",omitempty"`
+}
+
+func ifaceptr(x interface{}) interface{} {
+ return &x
+}
+
+var (
+ nameAttr = "Sarah"
+ ageAttr = uint(12)
+ contentsAttr = "lorem ipsum"
+)
+
+// Unless explicitly stated as such (or *Plain), all of the
+// tests below are two-way tests. When introducing new tests,
+// please try to make them two-way as well to ensure that
+// marshalling and unmarshalling are as symmetrical as feasible.
+var marshalTests = []struct {
+ Value interface{}
+ ExpectXML string
+ MarshalOnly bool
+ UnmarshalOnly bool
+}{
+ // Test nil marshals to nothing
+ {Value: nil, ExpectXML: ``, MarshalOnly: true},
+ {Value: nilStruct, ExpectXML: ``, MarshalOnly: true},
+
+ // Test value types
+ {Value: &Plain{true}, ExpectXML: `<Plain><V>true</V></Plain>`},
+ {Value: &Plain{false}, ExpectXML: `<Plain><V>false</V></Plain>`},
+ {Value: &Plain{int(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{int8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{int16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{int32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{uint(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{uint8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{uint16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{uint32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},
+ {Value: &Plain{float32(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`},
+ {Value: &Plain{float64(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`},
+ {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `<Plain><V>65501</V></Plain>`},
+ {Value: &Plain{"gopher"}, ExpectXML: `<Plain><V>gopher</V></Plain>`},
+ {Value: &Plain{[]byte("gopher")}, ExpectXML: `<Plain><V>gopher</V></Plain>`},
+ {Value: &Plain{"</>"}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},
+ {Value: &Plain{[]byte("</>")}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},
+ {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},
+ {Value: &Plain{NamedType("potato")}, ExpectXML: `<Plain><V>potato</V></Plain>`},
+ {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`},
+ {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`},
+ {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `<bool>true</bool>`},
+
+ // Test time.
+ {
+ Value: &Plain{time.Unix(1e9, 123456789).UTC()},
+ ExpectXML: `<Plain><V>2001-09-09T01:46:40.123456789Z</V></Plain>`,
+ },
+
+ // A pointer to struct{} may be used to test for an element's presence.
+ {
+ Value: &PresenceTest{new(struct{})},
+ ExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`,
+ },
+ {
+ Value: &PresenceTest{},
+ ExpectXML: `<PresenceTest></PresenceTest>`,
+ },
+
+ // A pointer to struct{} may be used to test for an element's presence.
+ {
+ Value: &PresenceTest{new(struct{})},
+ ExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`,
+ },
+ {
+ Value: &PresenceTest{},
+ ExpectXML: `<PresenceTest></PresenceTest>`,
+ },
+
+ // A []byte field is only nil if the element was not found.
+ {
+ Value: &Data{},
+ ExpectXML: `<Data></Data>`,
+ UnmarshalOnly: true,
+ },
+ {
+ Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}},
+ ExpectXML: `<Data Attr=""><Bytes></Bytes><Custom></Custom></Data>`,
+ UnmarshalOnly: true,
+ },
+
+ // Check that []byte works, including named []byte types.
+ {
+ Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}},
+ ExpectXML: `<Data Attr="v"><Bytes>ab</Bytes><Custom>cd</Custom></Data>`,
+ },
+
+ // Test innerxml
+ {
+ Value: &SecretAgent{
+ Handle: "007",
+ Identity: "James Bond",
+ Obfuscate: "<redacted/>",
+ },
+ ExpectXML: `<agent handle="007"><Identity>James Bond</Identity><redacted/></agent>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &SecretAgent{
+ Handle: "007",
+ Identity: "James Bond",
+ Obfuscate: "<Identity>James Bond</Identity><redacted/>",
+ },
+ ExpectXML: `<agent handle="007"><Identity>James Bond</Identity><redacted/></agent>`,
+ UnmarshalOnly: true,
+ },
+
+ // Test structs
+ {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `<port type="ssl">443</port>`},
+ {Value: &Port{Number: "443"}, ExpectXML: `<port>443</port>`},
+ {Value: &Port{Type: "<unix>"}, ExpectXML: `<port type="&lt;unix&gt;"></port>`},
+ {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `<port><!--https-->443</port>`},
+ {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `<port><!--add space- -->443</port>`, MarshalOnly: true},
+ {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `<domain>google.com&amp;friends</domain>`},
+ {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `<domain>google.com<!-- &friends --></domain>`},
+ {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `<book>Pride &amp; Prejudice</book>`},
+ {Value: &Event{Year: -3114}, ExpectXML: `<event>-3114</event>`},
+ {Value: &Movie{Length: 13440}, ExpectXML: `<movie>13440</movie>`},
+ {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `<pi>3.1415927</pi>`},
+ {Value: &Universe{Visible: 9.3e13}, ExpectXML: `<universe>9.3e+13</universe>`},
+ {Value: &Particle{HasMass: true}, ExpectXML: `<particle>true</particle>`},
+ {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `<departure>2013-01-09T00:15:00-09:00</departure>`},
+ {Value: atomValue, ExpectXML: atomXml},
+ {
+ Value: &Ship{
+ Name: "Heart of Gold",
+ Pilot: "Computer",
+ Age: 1,
+ Drive: ImprobabilityDrive,
+ Passenger: []*Passenger{
+ {
+ Name: []string{"Zaphod", "Beeblebrox"},
+ Weight: 7.25,
+ },
+ {
+ Name: []string{"Trisha", "McMillen"},
+ Weight: 5.5,
+ },
+ {
+ Name: []string{"Ford", "Prefect"},
+ Weight: 7,
+ },
+ {
+ Name: []string{"Arthur", "Dent"},
+ Weight: 6.75,
+ },
+ },
+ },
+ ExpectXML: `<spaceship name="Heart of Gold" pilot="Computer">` +
+ `<drive>` + strconv.Itoa(int(ImprobabilityDrive)) + `</drive>` +
+ `<age>1</age>` +
+ `<passenger>` +
+ `<name>Zaphod</name>` +
+ `<name>Beeblebrox</name>` +
+ `<weight>7.25</weight>` +
+ `</passenger>` +
+ `<passenger>` +
+ `<name>Trisha</name>` +
+ `<name>McMillen</name>` +
+ `<weight>5.5</weight>` +
+ `</passenger>` +
+ `<passenger>` +
+ `<name>Ford</name>` +
+ `<name>Prefect</name>` +
+ `<weight>7</weight>` +
+ `</passenger>` +
+ `<passenger>` +
+ `<name>Arthur</name>` +
+ `<name>Dent</name>` +
+ `<weight>6.75</weight>` +
+ `</passenger>` +
+ `</spaceship>`,
+ },
+
+ // Test a>b
+ {
+ Value: &NestedItems{Items: nil, Item1: nil},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: &NestedItems{Items: []string{}, Item1: []string{}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `</Items>` +
+ `</result>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &NestedItems{Items: nil, Item1: []string{"A"}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `<item1>A</item1>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `<item>A</item>` +
+ `<item>B</item>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}},
+ ExpectXML: `<result>` +
+ `<Items>` +
+ `<item>A</item>` +
+ `<item>B</item>` +
+ `<item1>C</item1>` +
+ `</Items>` +
+ `</result>`,
+ },
+ {
+ Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"},
+ ExpectXML: `<result>` +
+ `<parent>` +
+ `<c>C</c>` +
+ `<b>B</b>` +
+ `<a>A</a>` +
+ `</parent>` +
+ `</result>`,
+ },
+ {
+ Value: &NilTest{A: "A", B: nil, C: "C"},
+ ExpectXML: `<NilTest>` +
+ `<parent1>` +
+ `<parent2><a>A</a></parent2>` +
+ `<parent2><c>C</c></parent2>` +
+ `</parent1>` +
+ `</NilTest>`,
+ MarshalOnly: true, // Uses interface{}
+ },
+ {
+ Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"},
+ ExpectXML: `<result>` +
+ `<parent1><a>A</a></parent1>` +
+ `<b>B</b>` +
+ `<parent1>` +
+ `<parent2><c>C</c></parent2>` +
+ `<d>D</d>` +
+ `</parent1>` +
+ `</result>`,
+ },
+ {
+ Value: &Service{Port: &Port{Number: "80"}},
+ ExpectXML: `<service><host><port>80</port></host></service>`,
+ },
+ {
+ Value: &Service{},
+ ExpectXML: `<service></service>`,
+ },
+ {
+ Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"},
+ ExpectXML: `<service>` +
+ `<host><port>80</port></host>` +
+ `<Extra1>A</Extra1>` +
+ `<host><extra2>B</extra2></host>` +
+ `</service>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"},
+ ExpectXML: `<service>` +
+ `<host><port>80</port></host>` +
+ `<host><extra2>example</extra2></host>` +
+ `</service>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &struct {
+ XMLName struct{} `xml:"space top"`
+ A string `xml:"x>a"`
+ B string `xml:"x>b"`
+ C string `xml:"space x>c"`
+ C1 string `xml:"space1 x>c"`
+ D1 string `xml:"space1 x>d"`
+ E1 string `xml:"x>e"`
+ }{
+ A: "a",
+ B: "b",
+ C: "c",
+ C1: "c1",
+ D1: "d1",
+ E1: "e1",
+ },
+ ExpectXML: `<top xmlns="space">` +
+ `<x><a>a</a><b>b</b><c>c</c></x>` +
+ `<x xmlns="space1">` +
+ `<c>c1</c>` +
+ `<d>d1</d>` +
+ `</x>` +
+ `<x>` +
+ `<e>e1</e>` +
+ `</x>` +
+ `</top>`,
+ },
+ {
+ Value: &struct {
+ XMLName Name
+ A string `xml:"x>a"`
+ B string `xml:"x>b"`
+ C string `xml:"space x>c"`
+ C1 string `xml:"space1 x>c"`
+ D1 string `xml:"space1 x>d"`
+ }{
+ XMLName: Name{
+ Space: "space0",
+ Local: "top",
+ },
+ A: "a",
+ B: "b",
+ C: "c",
+ C1: "c1",
+ D1: "d1",
+ },
+ ExpectXML: `<top xmlns="space0">` +
+ `<x><a>a</a><b>b</b></x>` +
+ `<x xmlns="space"><c>c</c></x>` +
+ `<x xmlns="space1">` +
+ `<c>c1</c>` +
+ `<d>d1</d>` +
+ `</x>` +
+ `</top>`,
+ },
+ {
+ Value: &struct {
+ XMLName struct{} `xml:"top"`
+ B string `xml:"space x>b"`
+ B1 string `xml:"space1 x>b"`
+ }{
+ B: "b",
+ B1: "b1",
+ },
+ ExpectXML: `<top>` +
+ `<x xmlns="space"><b>b</b></x>` +
+ `<x xmlns="space1"><b>b1</b></x>` +
+ `</top>`,
+ },
+
+ // Test struct embedding
+ {
+ Value: &EmbedA{
+ EmbedC: EmbedC{
+ FieldA1: "", // Shadowed by A.A
+ FieldA2: "", // Shadowed by A.A
+ FieldB: "A.C.B",
+ FieldC: "A.C.C",
+ },
+ EmbedB: EmbedB{
+ FieldB: "A.B.B",
+ EmbedC: &EmbedC{
+ FieldA1: "A.B.C.A1",
+ FieldA2: "A.B.C.A2",
+ FieldB: "", // Shadowed by A.B.B
+ FieldC: "A.B.C.C",
+ },
+ },
+ FieldA: "A.A",
+ },
+ ExpectXML: `<EmbedA>` +
+ `<FieldB>A.C.B</FieldB>` +
+ `<FieldC>A.C.C</FieldC>` +
+ `<EmbedB>` +
+ `<FieldB>A.B.B</FieldB>` +
+ `<FieldA>` +
+ `<A1>A.B.C.A1</A1>` +
+ `<A2>A.B.C.A2</A2>` +
+ `</FieldA>` +
+ `<FieldC>A.B.C.C</FieldC>` +
+ `</EmbedB>` +
+ `<FieldA>A.A</FieldA>` +
+ `</EmbedA>`,
+ },
+
+ // Test that name casing matters
+ {
+ Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"},
+ ExpectXML: `<casing Xy="mixedA" XY="upperA"><Xy>mixed</Xy><XY>upper</XY></casing>`,
+ },
+
+ // Test the order in which the XML element name is chosen
+ {
+ Value: &NamePrecedence{
+ FromTag: XMLNameWithoutTag{Value: "A"},
+ FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"},
+ FromNameTag: XMLNameWithTag{Value: "C"},
+ InFieldName: "D",
+ },
+ ExpectXML: `<Parent>` +
+ `<InTag>A</InTag>` +
+ `<InXMLName>B</InXMLName>` +
+ `<InXMLNameTag>C</InXMLNameTag>` +
+ `<InFieldName>D</InFieldName>` +
+ `</Parent>`,
+ MarshalOnly: true,
+ },
+ {
+ Value: &NamePrecedence{
+ XMLName: Name{Local: "Parent"},
+ FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"},
+ FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"},
+ FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"},
+ InFieldName: "D",
+ },
+ ExpectXML: `<Parent>` +
+ `<InTag>A</InTag>` +
+ `<FromNameVal>B</FromNameVal>` +
+ `<InXMLNameTag>C</InXMLNameTag>` +
+ `<InFieldName>D</InFieldName>` +
+ `</Parent>`,
+ UnmarshalOnly: true,
+ },
+
+ // xml.Name works in a plain field as well.
+ {
+ Value: &NameInField{Name{Space: "ns", Local: "foo"}},
+ ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
+ },
+ {
+ Value: &NameInField{Name{Space: "ns", Local: "foo"}},
+ ExpectXML: `<NameInField><foo xmlns="ns"><ignore></ignore></foo></NameInField>`,
+ UnmarshalOnly: true,
+ },
+
+ // Marshaling zero xml.Name uses the tag or field name.
+ {
+ Value: &NameInField{},
+ ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
+ MarshalOnly: true,
+ },
+
+ // Test attributes
+ {
+ Value: &AttrTest{
+ Int: 8,
+ Named: 9,
+ Float: 23.5,
+ Uint8: 255,
+ Bool: true,
+ Str: "str",
+ Bytes: []byte("byt"),
+ },
+ ExpectXML: `<AttrTest Int="8" int="9" Float="23.5" Uint8="255"` +
+ ` Bool="true" Str="str" Bytes="byt"></AttrTest>`,
+ },
+ {
+ Value: &AttrTest{Bytes: []byte{}},
+ ExpectXML: `<AttrTest Int="0" int="0" Float="0" Uint8="0"` +
+ ` Bool="false" Str="" Bytes=""></AttrTest>`,
+ },
+ {
+ Value: &OmitAttrTest{
+ Int: 8,
+ Named: 9,
+ Float: 23.5,
+ Uint8: 255,
+ Bool: true,
+ Str: "str",
+ Bytes: []byte("byt"),
+ },
+ ExpectXML: `<OmitAttrTest Int="8" int="9" Float="23.5" Uint8="255"` +
+ ` Bool="true" Str="str" Bytes="byt"></OmitAttrTest>`,
+ },
+ {
+ Value: &OmitAttrTest{},
+ ExpectXML: `<OmitAttrTest></OmitAttrTest>`,
+ },
+
+ // pointer fields
+ {
+ Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr},
+ ExpectXML: `<dummy name="Sarah" age="12">lorem ipsum</dummy>`,
+ MarshalOnly: true,
+ },
+
+ // empty chardata pointer field
+ {
+ Value: &ChardataEmptyTest{},
+ ExpectXML: `<test></test>`,
+ MarshalOnly: true,
+ },
+
+ // omitempty on fields
+ {
+ Value: &OmitFieldTest{
+ Int: 8,
+ Named: 9,
+ Float: 23.5,
+ Uint8: 255,
+ Bool: true,
+ Str: "str",
+ Bytes: []byte("byt"),
+ Ptr: &PresenceTest{},
+ },
+ ExpectXML: `<OmitFieldTest>` +
+ `<Int>8</Int>` +
+ `<int>9</int>` +
+ `<Float>23.5</Float>` +
+ `<Uint8>255</Uint8>` +
+ `<Bool>true</Bool>` +
+ `<Str>str</Str>` +
+ `<Bytes>byt</Bytes>` +
+ `<Ptr></Ptr>` +
+ `</OmitFieldTest>`,
+ },
+ {
+ Value: &OmitFieldTest{},
+ ExpectXML: `<OmitFieldTest></OmitFieldTest>`,
+ },
+
+ // Test ",any"
+ {
+ ExpectXML: `<a><nested><value>known</value></nested><other><sub>unknown</sub></other></a>`,
+ Value: &AnyTest{
+ Nested: "known",
+ AnyField: AnyHolder{
+ XMLName: Name{Local: "other"},
+ XML: "<sub>unknown</sub>",
+ },
+ },
+ },
+ {
+ Value: &AnyTest{Nested: "known",
+ AnyField: AnyHolder{
+ XML: "<unknown/>",
+ XMLName: Name{Local: "AnyField"},
+ },
+ },
+ ExpectXML: `<a><nested><value>known</value></nested><AnyField><unknown/></AnyField></a>`,
+ },
+ {
+ ExpectXML: `<a><nested><value>b</value></nested></a>`,
+ Value: &AnyOmitTest{
+ Nested: "b",
+ },
+ },
+ {
+ ExpectXML: `<a><nested><value>b</value></nested><c><d>e</d></c><g xmlns="f"><h>i</h></g></a>`,
+ Value: &AnySliceTest{
+ Nested: "b",
+ AnyField: []AnyHolder{
+ {
+ XMLName: Name{Local: "c"},
+ XML: "<d>e</d>",
+ },
+ {
+ XMLName: Name{Space: "f", Local: "g"},
+ XML: "<h>i</h>",
+ },
+ },
+ },
+ },
+ {
+ ExpectXML: `<a><nested><value>b</value></nested></a>`,
+ Value: &AnySliceTest{
+ Nested: "b",
+ },
+ },
+
+ // Test recursive types.
+ {
+ Value: &RecurseA{
+ A: "a1",
+ B: &RecurseB{
+ A: &RecurseA{"a2", nil},
+ B: "b1",
+ },
+ },
+ ExpectXML: `<RecurseA><A>a1</A><B><A><A>a2</A></A><B>b1</B></B></RecurseA>`,
+ },
+
+ // Test ignoring fields via "-" tag
+ {
+ ExpectXML: `<IgnoreTest></IgnoreTest>`,
+ Value: &IgnoreTest{},
+ },
+ {
+ ExpectXML: `<IgnoreTest></IgnoreTest>`,
+ Value: &IgnoreTest{PublicSecret: "can't tell"},
+ MarshalOnly: true,
+ },
+ {
+ ExpectXML: `<IgnoreTest><PublicSecret>ignore me</PublicSecret></IgnoreTest>`,
+ Value: &IgnoreTest{},
+ UnmarshalOnly: true,
+ },
+
+ // Test escaping.
+ {
+ ExpectXML: `<a><nested><value>dquote: &#34;; squote: &#39;; ampersand: &amp;; less: &lt;; greater: &gt;;</value></nested><empty></empty></a>`,
+ Value: &AnyTest{
+ Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`,
+ AnyField: AnyHolder{XMLName: Name{Local: "empty"}},
+ },
+ },
+ {
+ ExpectXML: `<a><nested><value>newline: &#xA;; cr: &#xD;; tab: &#x9;;</value></nested><AnyField></AnyField></a>`,
+ Value: &AnyTest{
+ Nested: "newline: \n; cr: \r; tab: \t;",
+ AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}},
+ },
+ },
+ {
+ ExpectXML: "<a><nested><value>1\r2\r\n3\n\r4\n5</value></nested></a>",
+ Value: &AnyTest{
+ Nested: "1\n2\n3\n\n4\n5",
+ },
+ UnmarshalOnly: true,
+ },
+ {
+ ExpectXML: `<EmbedInt><MyInt>42</MyInt></EmbedInt>`,
+ Value: &EmbedInt{
+ MyInt: 42,
+ },
+ },
+ // Test omitempty with parent chain; see golang.org/issue/4168.
+ {
+ ExpectXML: `<Strings><A></A></Strings>`,
+ Value: &Strings{},
+ },
+ // Custom marshalers.
+ {
+ ExpectXML: `<MyMarshalerTest>hello world</MyMarshalerTest>`,
+ Value: &MyMarshalerTest{},
+ },
+ {
+ ExpectXML: `<MarshalerStruct Foo="hello world"></MarshalerStruct>`,
+ Value: &MarshalerStruct{},
+ },
+ {
+ ExpectXML: `<MarshalerValueStruct Foo="hello world"></MarshalerValueStruct>`,
+ Value: &MarshalerValueStruct{},
+ },
+ {
+ ExpectXML: `<outer xmlns="testns" int="10"></outer>`,
+ Value: &OuterStruct{IntAttr: 10},
+ },
+ {
+ ExpectXML: `<test xmlns="outerns" int="10"></test>`,
+ Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10},
+ },
+ {
+ ExpectXML: `<test xmlns="outerns" int="10"></test>`,
+ Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10},
+ },
+ {
+ ExpectXML: `<outer xmlns="testns" int="10"></outer>`,
+ Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}},
+ },
+ {
+ ExpectXML: `<NestedAndChardata><A><B></B><B></B></A>test</NestedAndChardata>`,
+ Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"},
+ },
+ {
+ ExpectXML: `<NestedAndComment><A><B></B><B></B></A><!--test--></NestedAndComment>`,
+ Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"},
+ },
+ {
+ ExpectXML: `<XMLNSFieldStruct xmlns="http://example.com/ns"><Body>hello world</Body></XMLNSFieldStruct>`,
+ Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"},
+ },
+ {
+ ExpectXML: `<testns:test xmlns:testns="testns" xmlns="http://example.com/ns"><Body>hello world</Body></testns:test>`,
+ Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"},
+ },
+ {
+ ExpectXML: `<testns:test xmlns:testns="testns"><Body>hello world</Body></testns:test>`,
+ Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"},
+ },
+ {
+ ExpectXML: `<XMLNSFieldStructWithOmitEmpty><Body>hello world</Body></XMLNSFieldStructWithOmitEmpty>`,
+ Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"},
+ },
+ {
+ // The xmlns attribute must be ignored because the <test>
+ // element is in the empty namespace, so it's not possible
+ // to set the default namespace to something non-empty.
+ ExpectXML: `<test><Body>hello world</Body></test>`,
+ Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"},
+ MarshalOnly: true,
+ },
+ {
+ ExpectXML: `<RecursiveXMLNSFieldStruct xmlns="foo"><Body xmlns=""><Text>hello world</Text></Body></RecursiveXMLNSFieldStruct>`,
+ Value: &RecursiveXMLNSFieldStruct{
+ Ns: "foo",
+ Body: &RecursiveXMLNSFieldStruct{
+ Text: "hello world",
+ },
+ },
+ },
+}
+
+func TestMarshal(t *testing.T) {
+ for idx, test := range marshalTests {
+ if test.UnmarshalOnly {
+ continue
+ }
+ data, err := Marshal(test.Value)
+ if err != nil {
+ t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err)
+ continue
+ }
+ if got, want := string(data), test.ExpectXML; got != want {
+ if strings.Contains(want, "\n") {
+ t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want)
+ } else {
+ t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want)
+ }
+ }
+ }
+}
+
+type AttrParent struct {
+ X string `xml:"X>Y,attr"`
+}
+
+type BadAttr struct {
+ Name []string `xml:"name,attr"`
+}
+
+var marshalErrorTests = []struct {
+ Value interface{}
+ Err string
+ Kind reflect.Kind
+}{
+ {
+ Value: make(chan bool),
+ Err: "xml: unsupported type: chan bool",
+ Kind: reflect.Chan,
+ },
+ {
+ Value: map[string]string{
+ "question": "What do you get when you multiply six by nine?",
+ "answer": "42",
+ },
+ Err: "xml: unsupported type: map[string]string",
+ Kind: reflect.Map,
+ },
+ {
+ Value: map[*Ship]bool{nil: false},
+ Err: "xml: unsupported type: map[*xml.Ship]bool",
+ Kind: reflect.Map,
+ },
+ {
+ Value: &Domain{Comment: []byte("f--bar")},
+ Err: `xml: comments must not contain "--"`,
+ },
+ // Reject parent chain with attr, never worked; see golang.org/issue/5033.
+ {
+ Value: &AttrParent{},
+ Err: `xml: X>Y chain not valid with attr flag`,
+ },
+ {
+ Value: BadAttr{[]string{"X", "Y"}},
+ Err: `xml: unsupported type: []string`,
+ },
+}
+
+var marshalIndentTests = []struct {
+ Value interface{}
+ Prefix string
+ Indent string
+ ExpectXML string
+}{
+ {
+ Value: &SecretAgent{
+ Handle: "007",
+ Identity: "James Bond",
+ Obfuscate: "<redacted/>",
+ },
+ Prefix: "",
+ Indent: "\t",
+ ExpectXML: fmt.Sprintf("<agent handle=\"007\">\n\t<Identity>James Bond</Identity><redacted/>\n</agent>"),
+ },
+}
+
+func TestMarshalErrors(t *testing.T) {
+ for idx, test := range marshalErrorTests {
+ data, err := Marshal(test.Value)
+ if err == nil {
+ t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err)
+ continue
+ }
+ if err.Error() != test.Err {
+ t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err)
+ }
+ if test.Kind != reflect.Invalid {
+ if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind {
+ t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind)
+ }
+ }
+ }
+}
+
+// Do invertibility testing on the various structures that we test
+func TestUnmarshal(t *testing.T) {
+ for i, test := range marshalTests {
+ if test.MarshalOnly {
+ continue
+ }
+ if _, ok := test.Value.(*Plain); ok {
+ continue
+ }
+ vt := reflect.TypeOf(test.Value)
+ dest := reflect.New(vt.Elem()).Interface()
+ err := Unmarshal([]byte(test.ExpectXML), dest)
+
+ switch fix := dest.(type) {
+ case *Feed:
+ fix.Author.InnerXML = ""
+ for i := range fix.Entry {
+ fix.Entry[i].Author.InnerXML = ""
+ }
+ }
+
+ if err != nil {
+ t.Errorf("#%d: unexpected error: %#v", i, err)
+ } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) {
+ t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want)
+ }
+ }
+}
+
+func TestMarshalIndent(t *testing.T) {
+ for i, test := range marshalIndentTests {
+ data, err := MarshalIndent(test.Value, test.Prefix, test.Indent)
+ if err != nil {
+ t.Errorf("#%d: Error: %s", i, err)
+ continue
+ }
+ if got, want := string(data), test.ExpectXML; got != want {
+ t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want)
+ }
+ }
+}
+
+type limitedBytesWriter struct {
+ w io.Writer
+ remain int // until writes fail
+}
+
+func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) {
+ if lw.remain <= 0 {
+ println("error")
+ return 0, errors.New("write limit hit")
+ }
+ if len(p) > lw.remain {
+ p = p[:lw.remain]
+ n, _ = lw.w.Write(p)
+ lw.remain = 0
+ return n, errors.New("write limit hit")
+ }
+ n, err = lw.w.Write(p)
+ lw.remain -= n
+ return n, err
+}
+
+func TestMarshalWriteErrors(t *testing.T) {
+ var buf bytes.Buffer
+ const writeCap = 1024
+ w := &limitedBytesWriter{&buf, writeCap}
+ enc := NewEncoder(w)
+ var err error
+ var i int
+ const n = 4000
+ for i = 1; i <= n; i++ {
+ err = enc.Encode(&Passenger{
+ Name: []string{"Alice", "Bob"},
+ Weight: 5,
+ })
+ if err != nil {
+ break
+ }
+ }
+ if err == nil {
+ t.Error("expected an error")
+ }
+ if i == n {
+ t.Errorf("expected to fail before the end")
+ }
+ if buf.Len() != writeCap {
+ t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap)
+ }
+}
+
+func TestMarshalWriteIOErrors(t *testing.T) {
+ enc := NewEncoder(errWriter{})
+
+ expectErr := "unwritable"
+ err := enc.Encode(&Passenger{})
+ if err == nil || err.Error() != expectErr {
+ t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr)
+ }
+}
+
+func TestMarshalFlush(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ if err := enc.EncodeToken(CharData("hello world")); err != nil {
+ t.Fatalf("enc.EncodeToken: %v", err)
+ }
+ if buf.Len() > 0 {
+ t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes())
+ }
+ if err := enc.Flush(); err != nil {
+ t.Fatalf("enc.Flush: %v", err)
+ }
+ if buf.String() != "hello world" {
+ t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world")
+ }
+}
+
+var encodeElementTests = []struct {
+ desc string
+ value interface{}
+ start StartElement
+ expectXML string
+}{{
+ desc: "simple string",
+ value: "hello",
+ start: StartElement{
+ Name: Name{Local: "a"},
+ },
+ expectXML: `<a>hello</a>`,
+}, {
+ desc: "string with added attributes",
+ value: "hello",
+ start: StartElement{
+ Name: Name{Local: "a"},
+ Attr: []Attr{{
+ Name: Name{Local: "x"},
+ Value: "y",
+ }, {
+ Name: Name{Local: "foo"},
+ Value: "bar",
+ }},
+ },
+ expectXML: `<a x="y" foo="bar">hello</a>`,
+}, {
+ desc: "start element with default name space",
+ value: struct {
+ Foo XMLNameWithNSTag
+ }{
+ Foo: XMLNameWithNSTag{
+ Value: "hello",
+ },
+ },
+ start: StartElement{
+ Name: Name{Space: "ns", Local: "a"},
+ Attr: []Attr{{
+ Name: Name{Local: "xmlns"},
+ // "ns" is the name space defined in XMLNameWithNSTag
+ Value: "ns",
+ }},
+ },
+ expectXML: `<a xmlns="ns"><InXMLNameWithNSTag>hello</InXMLNameWithNSTag></a>`,
+}, {
+ desc: "start element in name space with different default name space",
+ value: struct {
+ Foo XMLNameWithNSTag
+ }{
+ Foo: XMLNameWithNSTag{
+ Value: "hello",
+ },
+ },
+ start: StartElement{
+ Name: Name{Space: "ns2", Local: "a"},
+ Attr: []Attr{{
+ Name: Name{Local: "xmlns"},
+ // "ns" is the name space defined in XMLNameWithNSTag
+ Value: "ns",
+ }},
+ },
+ expectXML: `<ns2:a xmlns:ns2="ns2" xmlns="ns"><InXMLNameWithNSTag>hello</InXMLNameWithNSTag></ns2:a>`,
+}, {
+ desc: "XMLMarshaler with start element with default name space",
+ value: &MyMarshalerTest{},
+ start: StartElement{
+ Name: Name{Space: "ns2", Local: "a"},
+ Attr: []Attr{{
+ Name: Name{Local: "xmlns"},
+ // "ns" is the name space defined in XMLNameWithNSTag
+ Value: "ns",
+ }},
+ },
+ expectXML: `<ns2:a xmlns:ns2="ns2" xmlns="ns">hello world</ns2:a>`,
+}}
+
+func TestEncodeElement(t *testing.T) {
+ for idx, test := range encodeElementTests {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ err := enc.EncodeElement(test.value, test.start)
+ if err != nil {
+ t.Fatalf("enc.EncodeElement: %v", err)
+ }
+ err = enc.Flush()
+ if err != nil {
+ t.Fatalf("enc.Flush: %v", err)
+ }
+ if got, want := buf.String(), test.expectXML; got != want {
+ t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want)
+ }
+ }
+}
+
+func BenchmarkMarshal(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ Marshal(atomValue)
+ }
+}
+
+func BenchmarkUnmarshal(b *testing.B) {
+ b.ReportAllocs()
+ xml := []byte(atomXml)
+ for i := 0; i < b.N; i++ {
+ Unmarshal(xml, &Feed{})
+ }
+}
+
+// golang.org/issue/6556
+func TestStructPointerMarshal(t *testing.T) {
+ type A struct {
+ XMLName string `xml:"a"`
+ B []interface{}
+ }
+ type C struct {
+ XMLName Name
+ Value string `xml:"value"`
+ }
+
+ a := new(A)
+ a.B = append(a.B, &C{
+ XMLName: Name{Local: "c"},
+ Value: "x",
+ })
+
+ b, err := Marshal(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if x := string(b); x != "<a><c><value>x</value></c></a>" {
+ t.Fatal(x)
+ }
+ var v A
+ err = Unmarshal(b, &v)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+var encodeTokenTests = []struct {
+ desc string
+ toks []Token
+ want string
+ err string
+}{{
+ desc: "start element with name space",
+ toks: []Token{
+ StartElement{Name{"space", "local"}, nil},
+ },
+ want: `<space:local xmlns:space="space">`,
+}, {
+ desc: "start element with no name",
+ toks: []Token{
+ StartElement{Name{"space", ""}, nil},
+ },
+ err: "xml: start tag with no name",
+}, {
+ desc: "end element with no name",
+ toks: []Token{
+ EndElement{Name{"space", ""}},
+ },
+ err: "xml: end tag with no name",
+}, {
+ desc: "char data",
+ toks: []Token{
+ CharData("foo"),
+ },
+ want: `foo`,
+}, {
+ desc: "char data with escaped chars",
+ toks: []Token{
+ CharData(" \t\n"),
+ },
+ want: " &#x9;\n",
+}, {
+ desc: "comment",
+ toks: []Token{
+ Comment("foo"),
+ },
+ want: `<!--foo-->`,
+}, {
+ desc: "comment with invalid content",
+ toks: []Token{
+ Comment("foo-->"),
+ },
+ err: "xml: EncodeToken of Comment containing --> marker",
+}, {
+ desc: "proc instruction",
+ toks: []Token{
+ ProcInst{"Target", []byte("Instruction")},
+ },
+ want: `<?Target Instruction?>`,
+}, {
+ desc: "proc instruction with empty target",
+ toks: []Token{
+ ProcInst{"", []byte("Instruction")},
+ },
+ err: "xml: EncodeToken of ProcInst with invalid Target",
+}, {
+ desc: "proc instruction with bad content",
+ toks: []Token{
+ ProcInst{"", []byte("Instruction?>")},
+ },
+ err: "xml: EncodeToken of ProcInst with invalid Target",
+}, {
+ desc: "directive",
+ toks: []Token{
+ Directive("foo"),
+ },
+ want: `<!foo>`,
+}, {
+ desc: "more complex directive",
+ toks: []Token{
+ Directive("DOCTYPE doc [ <!ELEMENT doc '>'> <!-- com>ment --> ]"),
+ },
+ want: `<!DOCTYPE doc [ <!ELEMENT doc '>'> <!-- com>ment --> ]>`,
+}, {
+ desc: "directive instruction with bad name",
+ toks: []Token{
+ Directive("foo>"),
+ },
+ err: "xml: EncodeToken of Directive containing wrong < or > markers",
+}, {
+ desc: "end tag without start tag",
+ toks: []Token{
+ EndElement{Name{"foo", "bar"}},
+ },
+ err: "xml: end tag </bar> without start tag",
+}, {
+ desc: "mismatching end tag local name",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, nil},
+ EndElement{Name{"", "bar"}},
+ },
+ err: "xml: end tag </bar> does not match start tag <foo>",
+ want: `<foo>`,
+}, {
+ desc: "mismatching end tag namespace",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, nil},
+ EndElement{Name{"another", "foo"}},
+ },
+ err: "xml: end tag </foo> in namespace another does not match start tag <foo> in namespace space",
+ want: `<space:foo xmlns:space="space">`,
+}, {
+ desc: "start element with explicit namespace",
+ toks: []Token{
+ StartElement{Name{"space", "local"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ {Name{"space", "foo"}, "value"},
+ }},
+ },
+ want: `<x:local xmlns:x="space" x:foo="value">`,
+}, {
+ desc: "start element with explicit namespace and colliding prefix",
+ toks: []Token{
+ StartElement{Name{"space", "local"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ {Name{"space", "foo"}, "value"},
+ {Name{"x", "bar"}, "other"},
+ }},
+ },
+ want: `<x:local xmlns:x_1="x" xmlns:x="space" x:foo="value" x_1:bar="other">`,
+}, {
+ desc: "start element using previously defined namespace",
+ toks: []Token{
+ StartElement{Name{"", "local"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"space", "x"}, "y"},
+ }},
+ },
+ want: `<local xmlns:x="space"><x:foo x:x="y">`,
+}, {
+ desc: "nested name space with same prefix",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "x"}, "space1"},
+ }},
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "x"}, "space2"},
+ }},
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"space1", "a"}, "space1 value"},
+ {Name{"space2", "b"}, "space2 value"},
+ }},
+ EndElement{Name{"", "foo"}},
+ EndElement{Name{"", "foo"}},
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"space1", "a"}, "space1 value"},
+ {Name{"space2", "b"}, "space2 value"},
+ }},
+ },
+ want: `<foo xmlns:x="space1"><foo xmlns:x="space2"><foo xmlns:space1="space1" space1:a="space1 value" x:b="space2 value"></foo></foo><foo xmlns:space2="space2" x:a="space1 value" space2:b="space2 value">`,
+}, {
+ desc: "start element defining several prefixes for the same name space",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"xmlns", "a"}, "space"},
+ {Name{"xmlns", "b"}, "space"},
+ {Name{"space", "x"}, "value"},
+ }},
+ },
+ want: `<a:foo xmlns:a="space" a:x="value">`,
+}, {
+ desc: "nested element redefines name space",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"xmlns", "y"}, "space"},
+ {Name{"space", "a"}, "value"},
+ }},
+ },
+ want: `<foo xmlns:x="space"><x:foo x:a="value">`,
+}, {
+ desc: "nested element creates alias for default name space",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"xmlns", "y"}, "space"},
+ {Name{"space", "a"}, "value"},
+ }},
+ },
+ want: `<foo xmlns="space"><foo xmlns:y="space" y:a="value">`,
+}, {
+ desc: "nested element defines default name space with existing prefix",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "x"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ {Name{"space", "a"}, "value"},
+ }},
+ },
+ want: `<foo xmlns:x="space"><foo xmlns="space" x:a="value">`,
+}, {
+ desc: "nested element uses empty attribute name space when default ns defined",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "attr"}, "value"},
+ }},
+ },
+ want: `<foo xmlns="space"><foo attr="value">`,
+}, {
+ desc: "redefine xmlns",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"foo", "xmlns"}, "space"},
+ }},
+ },
+ err: `xml: cannot redefine xmlns attribute prefix`,
+}, {
+ desc: "xmlns with explicit name space #1",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"xml", "xmlns"}, "space"},
+ }},
+ },
+ want: `<foo xmlns="space">`,
+}, {
+ desc: "xmlns with explicit name space #2",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{xmlURL, "xmlns"}, "space"},
+ }},
+ },
+ want: `<foo xmlns="space">`,
+}, {
+ desc: "empty name space declaration is ignored",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"xmlns", "foo"}, ""},
+ }},
+ },
+ want: `<foo>`,
+}, {
+ desc: "attribute with no name is ignored",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"", ""}, "value"},
+ }},
+ },
+ want: `<foo>`,
+}, {
+ desc: "namespace URL with non-valid name",
+ toks: []Token{
+ StartElement{Name{"/34", "foo"}, []Attr{
+ {Name{"/34", "x"}, "value"},
+ }},
+ },
+ want: `<_:foo xmlns:_="/34" _:x="value">`,
+}, {
+ desc: "nested element resets default namespace to empty",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"", "xmlns"}, ""},
+ {Name{"", "x"}, "value"},
+ {Name{"space", "x"}, "value"},
+ }},
+ },
+ want: `<foo xmlns="space"><foo xmlns:space="space" xmlns="" x="value" space:x="value">`,
+}, {
+ desc: "nested element requires empty default name space",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"", "foo"}, nil},
+ },
+ want: `<foo xmlns="space"><foo xmlns="">`,
+}, {
+ desc: "attribute uses name space from xmlns",
+ toks: []Token{
+ StartElement{Name{"some/space", "foo"}, []Attr{
+ {Name{"", "attr"}, "value"},
+ {Name{"some/space", "other"}, "other value"},
+ }},
+ },
+ want: `<space:foo xmlns:space="some/space" attr="value" space:other="other value">`,
+}, {
+ desc: "default name space should not be used by attributes",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ {Name{"xmlns", "bar"}, "space"},
+ {Name{"space", "baz"}, "foo"},
+ }},
+ StartElement{Name{"space", "baz"}, nil},
+ EndElement{Name{"space", "baz"}},
+ EndElement{Name{"space", "foo"}},
+ },
+ want: `<foo xmlns:bar="space" xmlns="space" bar:baz="foo"><baz></baz></foo>`,
+}, {
+ desc: "default name space not used by attributes, not explicitly defined",
+ toks: []Token{
+ StartElement{Name{"space", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ {Name{"space", "baz"}, "foo"},
+ }},
+ StartElement{Name{"space", "baz"}, nil},
+ EndElement{Name{"space", "baz"}},
+ EndElement{Name{"space", "foo"}},
+ },
+ want: `<foo xmlns:space="space" xmlns="space" space:baz="foo"><baz></baz></foo>`,
+}, {
+ desc: "impossible xmlns declaration",
+ toks: []Token{
+ StartElement{Name{"", "foo"}, []Attr{
+ {Name{"", "xmlns"}, "space"},
+ }},
+ StartElement{Name{"space", "bar"}, []Attr{
+ {Name{"space", "attr"}, "value"},
+ }},
+ },
+ want: `<foo><space:bar xmlns:space="space" space:attr="value">`,
+}}
+
+func TestEncodeToken(t *testing.T) {
+loop:
+ for i, tt := range encodeTokenTests {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ var err error
+ for j, tok := range tt.toks {
+ err = enc.EncodeToken(tok)
+ if err != nil && j < len(tt.toks)-1 {
+ t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err)
+ continue loop
+ }
+ }
+ errorf := func(f string, a ...interface{}) {
+ t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...))
+ }
+ switch {
+ case tt.err != "" && err == nil:
+ errorf(" expected error; got none")
+ continue
+ case tt.err == "" && err != nil:
+ errorf(" got error: %v", err)
+ continue
+ case tt.err != "" && err != nil && tt.err != err.Error():
+ errorf(" error mismatch; got %v, want %v", err, tt.err)
+ continue
+ }
+ if err := enc.Flush(); err != nil {
+ errorf(" %v", err)
+ continue
+ }
+ if got := buf.String(); got != tt.want {
+ errorf("\ngot %v\nwant %v", got, tt.want)
+ continue
+ }
+ }
+}
+
+func TestProcInstEncodeToken(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+
+ if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil {
+ t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err)
+ }
+
+ if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil {
+ t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst")
+ }
+
+ if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil {
+ t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token")
+ }
+}
+
+func TestDecodeEncode(t *testing.T) {
+ var in, out bytes.Buffer
+ in.WriteString(`<?xml version="1.0" encoding="UTF-8"?>
+<?Target Instruction?>
+<root>
+</root>
+`)
+ dec := NewDecoder(&in)
+ enc := NewEncoder(&out)
+ for tok, err := dec.Token(); err == nil; tok, err = dec.Token() {
+ err = enc.EncodeToken(tok)
+ if err != nil {
+ t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err)
+ }
+ }
+}
+
+// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race.
+func TestRace9796(t *testing.T) {
+ type A struct{}
+ type B struct {
+ C []A `xml:"X>Y"`
+ }
+ var wg sync.WaitGroup
+ for i := 0; i < 2; i++ {
+ wg.Add(1)
+ go func() {
+ Marshal(B{[]A{{}}})
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func TestIsValidDirective(t *testing.T) {
+ testOK := []string{
+ "<>",
+ "< < > >",
+ "<!DOCTYPE '<' '>' '>' <!--nothing-->>",
+ "<!DOCTYPE doc [ <!ELEMENT doc ANY> <!ELEMENT doc ANY> ]>",
+ "<!DOCTYPE doc [ <!ELEMENT doc \"ANY> '<' <!E\" LEMENT '>' doc ANY> ]>",
+ "<!DOCTYPE doc <!-- just>>>> a < comment --> [ <!ITEM anything> ] >",
+ }
+ testKO := []string{
+ "<",
+ ">",
+ "<!--",
+ "-->",
+ "< > > < < >",
+ "<!dummy <!-- > -->",
+ "<!DOCTYPE doc '>",
+ "<!DOCTYPE doc '>'",
+ "<!DOCTYPE doc <!--comment>",
+ }
+ for _, s := range testOK {
+ if !isValidDirective(Directive(s)) {
+ t.Errorf("Directive %q is expected to be valid", s)
+ }
+ }
+ for _, s := range testKO {
+ if isValidDirective(Directive(s)) {
+ t.Errorf("Directive %q is expected to be invalid", s)
+ }
+ }
+}
+
+// Issue 11719. EncodeToken used to silently eat tokens with an invalid type.
+func TestSimpleUseOfEncodeToken(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil {
+ t.Errorf("enc.EncodeToken: pointer type should be rejected")
+ }
+ if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil {
+ t.Errorf("enc.EncodeToken: pointer type should be rejected")
+ }
+ if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil {
+ t.Errorf("enc.EncodeToken: StartElement %s", err)
+ }
+ if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil {
+ t.Errorf("enc.EncodeToken: EndElement %s", err)
+ }
+ if err := enc.EncodeToken(Universe{}); err == nil {
+ t.Errorf("enc.EncodeToken: invalid type not caught")
+ }
+ if err := enc.Flush(); err != nil {
+ t.Errorf("enc.Flush: %s", err)
+ }
+ if buf.Len() == 0 {
+ t.Errorf("enc.EncodeToken: empty buffer")
+ }
+ want := "<object2></object2>"
+ if buf.String() != want {
+ t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String())
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/read.go
new file mode 100644
index 000000000..4089056a1
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/read.go
@@ -0,0 +1,692 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
+// an XML element is an order-dependent collection of anonymous
+// values, while a data structure is an order-independent collection
+// of named values.
+// See package json for a textual representation more suitable
+// to data structures.
+
+// Unmarshal parses the XML-encoded data and stores the result in
+// the value pointed to by v, which must be an arbitrary struct,
+// slice, or string. Well-formed data that does not fit into v is
+// discarded.
+//
+// Because Unmarshal uses the reflect package, it can only assign
+// to exported (upper case) fields. Unmarshal uses a case-sensitive
+// comparison to match XML element names to tag values and struct
+// field names.
+//
+// Unmarshal maps an XML element to a struct using the following rules.
+// In the rules, the tag of a field refers to the value associated with the
+// key 'xml' in the struct field's tag (see the example above).
+//
+// * If the struct has a field of type []byte or string with tag
+// ",innerxml", Unmarshal accumulates the raw XML nested inside the
+// element in that field. The rest of the rules still apply.
+//
+// * If the struct has a field named XMLName of type xml.Name,
+// Unmarshal records the element name in that field.
+//
+// * If the XMLName field has an associated tag of the form
+// "name" or "namespace-URL name", the XML element must have
+// the given name (and, optionally, name space) or else Unmarshal
+// returns an error.
+//
+// * If the XML element has an attribute whose name matches a
+// struct field name with an associated tag containing ",attr" or
+// the explicit name in a struct field tag of the form "name,attr",
+// Unmarshal records the attribute value in that field.
+//
+// * If the XML element contains character data, that data is
+// accumulated in the first struct field that has tag ",chardata".
+// The struct field may have type []byte or string.
+// If there is no such field, the character data is discarded.
+//
+// * If the XML element contains comments, they are accumulated in
+// the first struct field that has tag ",comment". The struct
+// field may have type []byte or string. If there is no such
+// field, the comments are discarded.
+//
+// * If the XML element contains a sub-element whose name matches
+// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
+// will descend into the XML structure looking for elements with the
+// given names, and will map the innermost elements to that struct
+// field. A tag starting with ">" is equivalent to one starting
+// with the field name followed by ">".
+//
+// * If the XML element contains a sub-element whose name matches
+// a struct field's XMLName tag and the struct field has no
+// explicit name tag as per the previous rule, unmarshal maps
+// the sub-element to that struct field.
+//
+// * If the XML element contains a sub-element whose name matches a
+// field without any mode flags (",attr", ",chardata", etc), Unmarshal
+// maps the sub-element to that struct field.
+//
+// * If the XML element contains a sub-element that hasn't matched any
+// of the above rules and the struct has a field with tag ",any",
+// unmarshal maps the sub-element to that struct field.
+//
+// * An anonymous struct field is handled as if the fields of its
+// value were part of the outer struct.
+//
+// * A struct field with tag "-" is never unmarshalled into.
+//
+// Unmarshal maps an XML element to a string or []byte by saving the
+// concatenation of that element's character data in the string or
+// []byte. The saved []byte is never nil.
+//
+// Unmarshal maps an attribute value to a string or []byte by saving
+// the value in the string or slice.
+//
+// Unmarshal maps an XML element to a slice by extending the length of
+// the slice and mapping the element to the newly created value.
+//
+// Unmarshal maps an XML element or attribute value to a bool by
+// setting it to the boolean value represented by the string.
+//
+// Unmarshal maps an XML element or attribute value to an integer or
+// floating-point field by setting the field to the result of
+// interpreting the string value in decimal. There is no check for
+// overflow.
+//
+// Unmarshal maps an XML element to an xml.Name by recording the
+// element name.
+//
+// Unmarshal maps an XML element to a pointer by setting the pointer
+// to a freshly allocated value and then mapping the element to that value.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ return NewDecoder(bytes.NewReader(data)).Decode(v)
+}
+
+// Decode works like xml.Unmarshal, except it reads the decoder
+// stream to find the start element.
+func (d *Decoder) Decode(v interface{}) error {
+ return d.DecodeElement(v, nil)
+}
+
+// DecodeElement works like xml.Unmarshal except that it takes
+// a pointer to the start XML element to decode into v.
+// It is useful when a client reads some raw XML tokens itself
+// but also wants to defer to Unmarshal for some elements.
+func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
+ val := reflect.ValueOf(v)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("non-pointer passed to Unmarshal")
+ }
+ return d.unmarshal(val.Elem(), start)
+}
+
+// An UnmarshalError represents an error in the unmarshalling process.
+type UnmarshalError string
+
+func (e UnmarshalError) Error() string { return string(e) }
+
+// Unmarshaler is the interface implemented by objects that can unmarshal
+// an XML element description of themselves.
+//
+// UnmarshalXML decodes a single XML element
+// beginning with the given start element.
+// If it returns an error, the outer call to Unmarshal stops and
+// returns that error.
+// UnmarshalXML must consume exactly one XML element.
+// One common implementation strategy is to unmarshal into
+// a separate value with a layout matching the expected XML
+// using d.DecodeElement, and then to copy the data from
+// that value into the receiver.
+// Another common strategy is to use d.Token to process the
+// XML object one token at a time.
+// UnmarshalXML may not use d.RawToken.
+type Unmarshaler interface {
+ UnmarshalXML(d *Decoder, start StartElement) error
+}
+
+// UnmarshalerAttr is the interface implemented by objects that can unmarshal
+// an XML attribute description of themselves.
+//
+// UnmarshalXMLAttr decodes a single XML attribute.
+// If it returns an error, the outer call to Unmarshal stops and
+// returns that error.
+// UnmarshalXMLAttr is used only for struct fields with the
+// "attr" option in the field tag.
+type UnmarshalerAttr interface {
+ UnmarshalXMLAttr(attr Attr) error
+}
+
+// receiverType returns the receiver type to use in an expression like "%s.MethodName".
+func receiverType(val interface{}) string {
+ t := reflect.TypeOf(val)
+ if t.Name() != "" {
+ return t.String()
+ }
+ return "(" + t.String() + ")"
+}
+
+// unmarshalInterface unmarshals a single XML element into val.
+// start is the opening tag of the element.
+func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
+ // Record that decoder must stop at end tag corresponding to start.
+ p.pushEOF()
+
+ p.unmarshalDepth++
+ err := val.UnmarshalXML(p, *start)
+ p.unmarshalDepth--
+ if err != nil {
+ p.popEOF()
+ return err
+ }
+
+ if !p.popEOF() {
+ return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
+ }
+
+ return nil
+}
+
+// unmarshalTextInterface unmarshals a single XML element into val.
+// The chardata contained in the element (but not its children)
+// is passed to the text unmarshaler.
+func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
+ var buf []byte
+ depth := 1
+ for depth > 0 {
+ t, err := p.Token()
+ if err != nil {
+ return err
+ }
+ switch t := t.(type) {
+ case CharData:
+ if depth == 1 {
+ buf = append(buf, t...)
+ }
+ case StartElement:
+ depth++
+ case EndElement:
+ depth--
+ }
+ }
+ return val.UnmarshalText(buf)
+}
+
+// unmarshalAttr unmarshals a single XML attribute into val.
+func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+
+ if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
+ // This is an unmarshaler with a non-pointer receiver,
+ // so it's likely to be incorrect, but we do what we're told.
+ return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
+ return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
+ }
+ }
+
+ // Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
+ if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
+ // This is an unmarshaler with a non-pointer receiver,
+ // so it's likely to be incorrect, but we do what we're told.
+ return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
+ return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
+ }
+ }
+
+ copyValue(val, []byte(attr.Value))
+ return nil
+}
+
+var (
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
+ textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+// Unmarshal a single XML element into val.
+func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
+ // Find start element if we need it.
+ if start == nil {
+ for {
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ if t, ok := tok.(StartElement); ok {
+ start = &t
+ break
+ }
+ }
+ }
+
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if val.Kind() == reflect.Interface && !val.IsNil() {
+ e := val.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() {
+ val = e
+ }
+ }
+
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+
+ if val.CanInterface() && val.Type().Implements(unmarshalerType) {
+ // This is an unmarshaler with a non-pointer receiver,
+ // so it's likely to be incorrect, but we do what we're told.
+ return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
+ }
+
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
+ return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
+ }
+ }
+
+ if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
+ return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
+ }
+
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
+ return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
+ }
+ }
+
+ var (
+ data []byte
+ saveData reflect.Value
+ comment []byte
+ saveComment reflect.Value
+ saveXML reflect.Value
+ saveXMLIndex int
+ saveXMLData []byte
+ saveAny reflect.Value
+ sv reflect.Value
+ tinfo *typeInfo
+ err error
+ )
+
+ switch v := val; v.Kind() {
+ default:
+ return errors.New("unknown type " + v.Type().String())
+
+ case reflect.Interface:
+ // TODO: For now, simply ignore the field. In the near
+ // future we may choose to unmarshal the start
+ // element on it, if not nil.
+ return p.Skip()
+
+ case reflect.Slice:
+ typ := v.Type()
+ if typ.Elem().Kind() == reflect.Uint8 {
+ // []byte
+ saveData = v
+ break
+ }
+
+ // Slice of element values.
+ // Grow slice.
+ n := v.Len()
+ if n >= v.Cap() {
+ ncap := 2 * n
+ if ncap < 4 {
+ ncap = 4
+ }
+ new := reflect.MakeSlice(typ, n, ncap)
+ reflect.Copy(new, v)
+ v.Set(new)
+ }
+ v.SetLen(n + 1)
+
+ // Recur to read element into slice.
+ if err := p.unmarshal(v.Index(n), start); err != nil {
+ v.SetLen(n)
+ return err
+ }
+ return nil
+
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
+ saveData = v
+
+ case reflect.Struct:
+ typ := v.Type()
+ if typ == nameType {
+ v.Set(reflect.ValueOf(start.Name))
+ break
+ }
+
+ sv = v
+ tinfo, err = getTypeInfo(typ)
+ if err != nil {
+ return err
+ }
+
+ // Validate and assign element name.
+ if tinfo.xmlname != nil {
+ finfo := tinfo.xmlname
+ if finfo.name != "" && finfo.name != start.Name.Local {
+ return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
+ }
+ if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
+ e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
+ if start.Name.Space == "" {
+ e += "no name space"
+ } else {
+ e += start.Name.Space
+ }
+ return UnmarshalError(e)
+ }
+ fv := finfo.value(sv)
+ if _, ok := fv.Interface().(Name); ok {
+ fv.Set(reflect.ValueOf(start.Name))
+ }
+ }
+
+ // Assign attributes.
+ // Also, determine whether we need to save character data or comments.
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ switch finfo.flags & fMode {
+ case fAttr:
+ strv := finfo.value(sv)
+ // Look for attribute.
+ for _, a := range start.Attr {
+ if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
+ if err := p.unmarshalAttr(strv, a); err != nil {
+ return err
+ }
+ break
+ }
+ }
+
+ case fCharData:
+ if !saveData.IsValid() {
+ saveData = finfo.value(sv)
+ }
+
+ case fComment:
+ if !saveComment.IsValid() {
+ saveComment = finfo.value(sv)
+ }
+
+ case fAny, fAny | fElement:
+ if !saveAny.IsValid() {
+ saveAny = finfo.value(sv)
+ }
+
+ case fInnerXml:
+ if !saveXML.IsValid() {
+ saveXML = finfo.value(sv)
+ if p.saved == nil {
+ saveXMLIndex = 0
+ p.saved = new(bytes.Buffer)
+ } else {
+ saveXMLIndex = p.savedOffset()
+ }
+ }
+ }
+ }
+ }
+
+ // Find end element.
+ // Process sub-elements along the way.
+Loop:
+ for {
+ var savedOffset int
+ if saveXML.IsValid() {
+ savedOffset = p.savedOffset()
+ }
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ switch t := tok.(type) {
+ case StartElement:
+ consumed := false
+ if sv.IsValid() {
+ consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
+ if err != nil {
+ return err
+ }
+ if !consumed && saveAny.IsValid() {
+ consumed = true
+ if err := p.unmarshal(saveAny, &t); err != nil {
+ return err
+ }
+ }
+ }
+ if !consumed {
+ if err := p.Skip(); err != nil {
+ return err
+ }
+ }
+
+ case EndElement:
+ if saveXML.IsValid() {
+ saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
+ if saveXMLIndex == 0 {
+ p.saved = nil
+ }
+ }
+ break Loop
+
+ case CharData:
+ if saveData.IsValid() {
+ data = append(data, t...)
+ }
+
+ case Comment:
+ if saveComment.IsValid() {
+ comment = append(comment, t...)
+ }
+ }
+ }
+
+ if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
+ if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
+ return err
+ }
+ saveData = reflect.Value{}
+ }
+
+ if saveData.IsValid() && saveData.CanAddr() {
+ pv := saveData.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
+ if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
+ return err
+ }
+ saveData = reflect.Value{}
+ }
+ }
+
+ if err := copyValue(saveData, data); err != nil {
+ return err
+ }
+
+ switch t := saveComment; t.Kind() {
+ case reflect.String:
+ t.SetString(string(comment))
+ case reflect.Slice:
+ t.Set(reflect.ValueOf(comment))
+ }
+
+ switch t := saveXML; t.Kind() {
+ case reflect.String:
+ t.SetString(string(saveXMLData))
+ case reflect.Slice:
+ t.Set(reflect.ValueOf(saveXMLData))
+ }
+
+ return nil
+}
+
+func copyValue(dst reflect.Value, src []byte) (err error) {
+ dst0 := dst
+
+ if dst.Kind() == reflect.Ptr {
+ if dst.IsNil() {
+ dst.Set(reflect.New(dst.Type().Elem()))
+ }
+ dst = dst.Elem()
+ }
+
+ // Save accumulated data.
+ switch dst.Kind() {
+ case reflect.Invalid:
+ // Probably a comment.
+ default:
+ return errors.New("cannot unmarshal into " + dst0.Type().String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
+ if err != nil {
+ return err
+ }
+ dst.SetInt(itmp)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
+ if err != nil {
+ return err
+ }
+ dst.SetUint(utmp)
+ case reflect.Float32, reflect.Float64:
+ ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
+ if err != nil {
+ return err
+ }
+ dst.SetFloat(ftmp)
+ case reflect.Bool:
+ value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
+ if err != nil {
+ return err
+ }
+ dst.SetBool(value)
+ case reflect.String:
+ dst.SetString(string(src))
+ case reflect.Slice:
+ if len(src) == 0 {
+ // non-nil to flag presence
+ src = []byte{}
+ }
+ dst.SetBytes(src)
+ }
+ return nil
+}
+
+// unmarshalPath walks down an XML structure looking for wanted
+// paths, and calls unmarshal on them.
+// The consumed result tells whether XML elements have been consumed
+// from the Decoder until start's matching end element, or if it's
+// still untouched because start is uninteresting for sv's fields.
+func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
+ recurse := false
+Loop:
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
+ continue
+ }
+ for j := range parents {
+ if parents[j] != finfo.parents[j] {
+ continue Loop
+ }
+ }
+ if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
+ // It's a perfect match, unmarshal the field.
+ return true, p.unmarshal(finfo.value(sv), start)
+ }
+ if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
+ // It's a prefix for the field. Break and recurse
+ // since it's not ok for one field path to be itself
+ // the prefix for another field path.
+ recurse = true
+
+ // We can reuse the same slice as long as we
+ // don't try to append to it.
+ parents = finfo.parents[:len(parents)+1]
+ break
+ }
+ }
+ if !recurse {
+ // We have no business with this element.
+ return false, nil
+ }
+ // The element is not a perfect match for any field, but one
+ // or more fields have the path to this element as a parent
+ // prefix. Recurse and attempt to match these.
+ for {
+ var tok Token
+ tok, err = p.Token()
+ if err != nil {
+ return true, err
+ }
+ switch t := tok.(type) {
+ case StartElement:
+ consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
+ if err != nil {
+ return true, err
+ }
+ if !consumed2 {
+ if err := p.Skip(); err != nil {
+ return true, err
+ }
+ }
+ case EndElement:
+ return true, nil
+ }
+ }
+}
+
+// Skip reads tokens until it has consumed the end element
+// matching the most recent start element already consumed.
+// It recurs if it encounters a start element, so it can be used to
+// skip nested structures.
+// It returns nil if it finds an end element matching the start
+// element; otherwise it returns an error describing the problem.
+func (d *Decoder) Skip() error {
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ return err
+ }
+ switch tok.(type) {
+ case StartElement:
+ if err := d.Skip(); err != nil {
+ return err
+ }
+ case EndElement:
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/read_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/read_test.go
new file mode 100644
index 000000000..02f1e10c3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/read_test.go
@@ -0,0 +1,744 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+// Stripped down Atom feed data structures.
+
+func TestUnmarshalFeed(t *testing.T) {
+ var f Feed
+ if err := Unmarshal([]byte(atomFeedString), &f); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if !reflect.DeepEqual(f, atomFeed) {
+ t.Fatalf("have %#v\nwant %#v", f, atomFeed)
+ }
+}
+
+// hget http://codereview.appspot.com/rss/mine/rsc
+const atomFeedString = `
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub
+</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
+ An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+ 1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all
+ feeds that will be pubsubhubbubbed.
+ 2. every time one of those feeds changes, tell the hub
+ with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&amp;#39;t quite get the server to work, but I think the bug
+is not in my code. I think that the server expects to be
+able to grab the feed and see the feed&amp;#39;s actual URL in
+the link rel=&amp;quot;self&amp;quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+</summary></entry><entry><title>rietveld: correct tab handling
+</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
+ This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&amp;#39;t know where to put the tab stops. Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites. I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+</summary></entry></feed> `
+
+type Feed struct {
+ XMLName Name `xml:"http://www.w3.org/2005/Atom feed"`
+ Title string `xml:"title"`
+ Id string `xml:"id"`
+ Link []Link `xml:"link"`
+ Updated time.Time `xml:"updated,attr"`
+ Author Person `xml:"author"`
+ Entry []Entry `xml:"entry"`
+}
+
+type Entry struct {
+ Title string `xml:"title"`
+ Id string `xml:"id"`
+ Link []Link `xml:"link"`
+ Updated time.Time `xml:"updated"`
+ Author Person `xml:"author"`
+ Summary Text `xml:"summary"`
+}
+
+type Link struct {
+ Rel string `xml:"rel,attr,omitempty"`
+ Href string `xml:"href,attr"`
+}
+
+type Person struct {
+ Name string `xml:"name"`
+ URI string `xml:"uri"`
+ Email string `xml:"email"`
+ InnerXML string `xml:",innerxml"`
+}
+
+type Text struct {
+ Type string `xml:"type,attr,omitempty"`
+ Body string `xml:",chardata"`
+}
+
+var atomFeed = Feed{
+ XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
+ Title: "Code Review - My issues",
+ Link: []Link{
+ {Rel: "alternate", Href: "http://codereview.appspot.com/"},
+ {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"},
+ },
+ Id: "http://codereview.appspot.com/",
+ Updated: ParseTime("2009-10-04T01:35:58+00:00"),
+ Author: Person{
+ Name: "rietveld<>",
+ InnerXML: "<name>rietveld&lt;&gt;</name>",
+ },
+ Entry: []Entry{
+ {
+ Title: "rietveld: an attempt at pubsubhubbub\n",
+ Link: []Link{
+ {Rel: "alternate", Href: "http://codereview.appspot.com/126085"},
+ },
+ Updated: ParseTime("2009-10-04T01:35:58+00:00"),
+ Author: Person{
+ Name: "email-address-removed",
+ InnerXML: "<name>email-address-removed</name>",
+ },
+ Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a",
+ Summary: Text{
+ Type: "html",
+ Body: `
+ An attempt at adding pubsubhubbub support to Rietveld.
+http://code.google.com/p/pubsubhubbub
+http://code.google.com/p/rietveld/issues/detail?id=155
+
+The server side of the protocol is trivial:
+ 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all
+ feeds that will be pubsubhubbubbed.
+ 2. every time one of those feeds changes, tell the hub
+ with a simple POST request.
+
+I have tested this by adding debug prints to a local hub
+server and checking that the server got the right publish
+requests.
+
+I can&#39;t quite get the server to work, but I think the bug
+is not in my code. I think that the server expects to be
+able to grab the feed and see the feed&#39;s actual URL in
+the link rel=&quot;self&quot;, but the default value for that drops
+the :port from the URL, and I cannot for the life of me
+figure out how to get the Atom generator deep inside
+django not to do that, or even where it is doing that,
+or even what code is running to generate the Atom feed.
+(I thought I knew but I added some assert False statements
+and it kept running!)
+
+Ignoring that particular problem, I would appreciate
+feedback on the right way to get the two values at
+the top of feeds.py marked NOTE(rsc).
+
+
+`,
+ },
+ },
+ {
+ Title: "rietveld: correct tab handling\n",
+ Link: []Link{
+ {Rel: "alternate", Href: "http://codereview.appspot.com/124106"},
+ },
+ Updated: ParseTime("2009-10-03T23:02:17+00:00"),
+ Author: Person{
+ Name: "email-address-removed",
+ InnerXML: "<name>email-address-removed</name>",
+ },
+ Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a",
+ Summary: Text{
+ Type: "html",
+ Body: `
+ This fixes the buggy tab rendering that can be seen at
+http://codereview.appspot.com/116075/diff/1/2
+
+The fundamental problem was that the tab code was
+not being told what column the text began in, so it
+didn&#39;t know where to put the tab stops. Another problem
+was that some of the code assumed that string byte
+offsets were the same as column offsets, which is only
+true if there are no tabs.
+
+In the process of fixing this, I cleaned up the arguments
+to Fold and ExpandTabs and renamed them Break and
+_ExpandTabs so that I could be sure that I found all the
+call sites. I also wanted to verify that ExpandTabs was
+not being used from outside intra_region_diff.py.
+
+
+`,
+ },
+ },
+ },
+}
+
+const pathTestString = `
+<Result>
+ <Before>1</Before>
+ <Items>
+ <Item1>
+ <Value>A</Value>
+ </Item1>
+ <Item2>
+ <Value>B</Value>
+ </Item2>
+ <Item1>
+ <Value>C</Value>
+ <Value>D</Value>
+ </Item1>
+ <_>
+ <Value>E</Value>
+ </_>
+ </Items>
+ <After>2</After>
+</Result>
+`
+
+type PathTestItem struct {
+ Value string
+}
+
+type PathTestA struct {
+ Items []PathTestItem `xml:">Item1"`
+ Before, After string
+}
+
+type PathTestB struct {
+ Other []PathTestItem `xml:"Items>Item1"`
+ Before, After string
+}
+
+type PathTestC struct {
+ Values1 []string `xml:"Items>Item1>Value"`
+ Values2 []string `xml:"Items>Item2>Value"`
+ Before, After string
+}
+
+type PathTestSet struct {
+ Item1 []PathTestItem
+}
+
+type PathTestD struct {
+ Other PathTestSet `xml:"Items"`
+ Before, After string
+}
+
+type PathTestE struct {
+ Underline string `xml:"Items>_>Value"`
+ Before, After string
+}
+
+var pathTests = []interface{}{
+ &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
+ &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
+ &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"},
+ &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"},
+ &PathTestE{Underline: "E", Before: "1", After: "2"},
+}
+
+func TestUnmarshalPaths(t *testing.T) {
+ for _, pt := range pathTests {
+ v := reflect.New(reflect.TypeOf(pt).Elem()).Interface()
+ if err := Unmarshal([]byte(pathTestString), v); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if !reflect.DeepEqual(v, pt) {
+ t.Fatalf("have %#v\nwant %#v", v, pt)
+ }
+ }
+}
+
+type BadPathTestA struct {
+ First string `xml:"items>item1"`
+ Other string `xml:"items>item2"`
+ Second string `xml:"items"`
+}
+
+type BadPathTestB struct {
+ Other string `xml:"items>item2>value"`
+ First string `xml:"items>item1"`
+ Second string `xml:"items>item1>value"`
+}
+
+type BadPathTestC struct {
+ First string
+ Second string `xml:"First"`
+}
+
+type BadPathTestD struct {
+ BadPathEmbeddedA
+ BadPathEmbeddedB
+}
+
+type BadPathEmbeddedA struct {
+ First string
+}
+
+type BadPathEmbeddedB struct {
+ Second string `xml:"First"`
+}
+
+var badPathTests = []struct {
+ v, e interface{}
+}{
+ {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}},
+ {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}},
+ {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}},
+ {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}},
+}
+
+func TestUnmarshalBadPaths(t *testing.T) {
+ for _, tt := range badPathTests {
+ err := Unmarshal([]byte(pathTestString), tt.v)
+ if !reflect.DeepEqual(err, tt.e) {
+ t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e)
+ }
+ }
+}
+
+const OK = "OK"
+const withoutNameTypeData = `
+<?xml version="1.0" charset="utf-8"?>
+<Test3 Attr="OK" />`
+
+type TestThree struct {
+ XMLName Name `xml:"Test3"`
+ Attr string `xml:",attr"`
+}
+
+func TestUnmarshalWithoutNameType(t *testing.T) {
+ var x TestThree
+ if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if x.Attr != OK {
+ t.Fatalf("have %v\nwant %v", x.Attr, OK)
+ }
+}
+
+func TestUnmarshalAttr(t *testing.T) {
+ type ParamVal struct {
+ Int int `xml:"int,attr"`
+ }
+
+ type ParamPtr struct {
+ Int *int `xml:"int,attr"`
+ }
+
+ type ParamStringPtr struct {
+ Int *string `xml:"int,attr"`
+ }
+
+ x := []byte(`<Param int="1" />`)
+
+ p1 := &ParamPtr{}
+ if err := Unmarshal(x, p1); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if p1.Int == nil {
+ t.Fatalf("Unmarshal failed in to *int field")
+ } else if *p1.Int != 1 {
+ t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1)
+ }
+
+ p2 := &ParamVal{}
+ if err := Unmarshal(x, p2); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if p2.Int != 1 {
+ t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1)
+ }
+
+ p3 := &ParamStringPtr{}
+ if err := Unmarshal(x, p3); err != nil {
+ t.Fatalf("Unmarshal: %s", err)
+ }
+ if p3.Int == nil {
+ t.Fatalf("Unmarshal failed in to *string field")
+ } else if *p3.Int != "1" {
+ t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1)
+ }
+}
+
+type Tables struct {
+ HTable string `xml:"http://www.w3.org/TR/html4/ table"`
+ FTable string `xml:"http://www.w3schools.com/furniture table"`
+}
+
+var tables = []struct {
+ xml string
+ tab Tables
+ ns string
+}{
+ {
+ xml: `<Tables>` +
+ `<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
+ `<table xmlns="http://www.w3schools.com/furniture">world</table>` +
+ `</Tables>`,
+ tab: Tables{"hello", "world"},
+ },
+ {
+ xml: `<Tables>` +
+ `<table xmlns="http://www.w3schools.com/furniture">world</table>` +
+ `<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
+ `</Tables>`,
+ tab: Tables{"hello", "world"},
+ },
+ {
+ xml: `<Tables xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/">` +
+ `<f:table>world</f:table>` +
+ `<h:table>hello</h:table>` +
+ `</Tables>`,
+ tab: Tables{"hello", "world"},
+ },
+ {
+ xml: `<Tables>` +
+ `<table>bogus</table>` +
+ `</Tables>`,
+ tab: Tables{},
+ },
+ {
+ xml: `<Tables>` +
+ `<table>only</table>` +
+ `</Tables>`,
+ tab: Tables{HTable: "only"},
+ ns: "http://www.w3.org/TR/html4/",
+ },
+ {
+ xml: `<Tables>` +
+ `<table>only</table>` +
+ `</Tables>`,
+ tab: Tables{FTable: "only"},
+ ns: "http://www.w3schools.com/furniture",
+ },
+ {
+ xml: `<Tables>` +
+ `<table>only</table>` +
+ `</Tables>`,
+ tab: Tables{},
+ ns: "something else entirely",
+ },
+}
+
+func TestUnmarshalNS(t *testing.T) {
+ for i, tt := range tables {
+ var dst Tables
+ var err error
+ if tt.ns != "" {
+ d := NewDecoder(strings.NewReader(tt.xml))
+ d.DefaultSpace = tt.ns
+ err = d.Decode(&dst)
+ } else {
+ err = Unmarshal([]byte(tt.xml), &dst)
+ }
+ if err != nil {
+ t.Errorf("#%d: Unmarshal: %v", i, err)
+ continue
+ }
+ want := tt.tab
+ if dst != want {
+ t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
+ }
+ }
+}
+
+func TestRoundTrip(t *testing.T) {
+ // From issue 7535
+ const s = `<ex:element xmlns:ex="http://example.com/schema"></ex:element>`
+ in := bytes.NewBufferString(s)
+ for i := 0; i < 10; i++ {
+ out := &bytes.Buffer{}
+ d := NewDecoder(in)
+ e := NewEncoder(out)
+
+ for {
+ t, err := d.Token()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ fmt.Println("failed:", err)
+ return
+ }
+ e.EncodeToken(t)
+ }
+ e.Flush()
+ in = out
+ }
+ if got := in.String(); got != s {
+ t.Errorf("have: %q\nwant: %q\n", got, s)
+ }
+}
+
+func TestMarshalNS(t *testing.T) {
+ dst := Tables{"hello", "world"}
+ data, err := Marshal(&dst)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ want := `<Tables><table xmlns="http://www.w3.org/TR/html4/">hello</table><table xmlns="http://www.w3schools.com/furniture">world</table></Tables>`
+ str := string(data)
+ if str != want {
+ t.Errorf("have: %q\nwant: %q\n", str, want)
+ }
+}
+
+type TableAttrs struct {
+ TAttr TAttr
+}
+
+type TAttr struct {
+ HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"`
+ FTable string `xml:"http://www.w3schools.com/furniture table,attr"`
+ Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"`
+ Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"`
+ Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"`
+ Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"`
+ Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"`
+}
+
+var tableAttrs = []struct {
+ xml string
+ tab TableAttrs
+ ns string
+}{
+ {
+ xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
+ `h:table="hello" f:table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
+ },
+ {
+ xml: `<TableAttrs><TAttr xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
+ `h:table="hello" f:table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
+ },
+ {
+ xml: `<TableAttrs><TAttr ` +
+ `h:table="hello" f:table="world" xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
+ },
+ {
+ // Default space does not apply to attribute names.
+ xml: `<TableAttrs xmlns="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
+ `h:table="hello" table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
+ },
+ {
+ // Default space does not apply to attribute names.
+ xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr xmlns="http://www.w3.org/TR/html4/" ` +
+ `table="hello" f:table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
+ },
+ {
+ xml: `<TableAttrs><TAttr ` +
+ `table="bogus" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{},
+ },
+ {
+ // Default space does not apply to attribute names.
+ xml: `<TableAttrs xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
+ `h:table="hello" table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
+ ns: "http://www.w3schools.com/furniture",
+ },
+ {
+ // Default space does not apply to attribute names.
+ xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr ` +
+ `table="hello" f:table="world" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
+ ns: "http://www.w3.org/TR/html4/",
+ },
+ {
+ xml: `<TableAttrs><TAttr ` +
+ `table="bogus" ` +
+ `/></TableAttrs>`,
+ tab: TableAttrs{},
+ ns: "something else entirely",
+ },
+}
+
+func TestUnmarshalNSAttr(t *testing.T) {
+ for i, tt := range tableAttrs {
+ var dst TableAttrs
+ var err error
+ if tt.ns != "" {
+ d := NewDecoder(strings.NewReader(tt.xml))
+ d.DefaultSpace = tt.ns
+ err = d.Decode(&dst)
+ } else {
+ err = Unmarshal([]byte(tt.xml), &dst)
+ }
+ if err != nil {
+ t.Errorf("#%d: Unmarshal: %v", i, err)
+ continue
+ }
+ want := tt.tab
+ if dst != want {
+ t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
+ }
+ }
+}
+
+func TestMarshalNSAttr(t *testing.T) {
+ src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}}
+ data, err := Marshal(&src)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ want := `<TableAttrs><TAttr xmlns:json_1="http://golang.org/2/json/" xmlns:json="http://golang.org/json/" xmlns:_xmlfoo="http://golang.org/xmlfoo/" xmlns:_xml="http://golang.org/xml/" xmlns:furniture="http://www.w3schools.com/furniture" xmlns:html4="http://www.w3.org/TR/html4/" html4:table="hello" furniture:table="world" xml:lang="en_US" _xml:other="other1" _xmlfoo:other="other2" json:other="other3" json_1:other="other4"></TAttr></TableAttrs>`
+ str := string(data)
+ if str != want {
+ t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want)
+ }
+
+ var dst TableAttrs
+ if err := Unmarshal(data, &dst); err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+
+ if dst != src {
+ t.Errorf("Unmarshal = %q, want %q", dst, src)
+ }
+}
+
+type MyCharData struct {
+ body string
+}
+
+func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error {
+ for {
+ t, err := d.Token()
+ if err == io.EOF { // found end of element
+ break
+ }
+ if err != nil {
+ return err
+ }
+ if char, ok := t.(CharData); ok {
+ m.body += string(char)
+ }
+ }
+ return nil
+}
+
+var _ Unmarshaler = (*MyCharData)(nil)
+
+func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error {
+ panic("must not call")
+}
+
+type MyAttr struct {
+ attr string
+}
+
+func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error {
+ m.attr = attr.Value
+ return nil
+}
+
+var _ UnmarshalerAttr = (*MyAttr)(nil)
+
+type MyStruct struct {
+ Data *MyCharData
+ Attr *MyAttr `xml:",attr"`
+
+ Data2 MyCharData
+ Attr2 MyAttr `xml:",attr"`
+}
+
+func TestUnmarshaler(t *testing.T) {
+ xml := `<?xml version="1.0" encoding="utf-8"?>
+ <MyStruct Attr="attr1" Attr2="attr2">
+ <Data>hello <!-- comment -->world</Data>
+ <Data2>howdy <!-- comment -->world</Data2>
+ </MyStruct>
+ `
+
+ var m MyStruct
+ if err := Unmarshal([]byte(xml), &m); err != nil {
+ t.Fatal(err)
+ }
+
+ if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" {
+ t.Errorf("m=%#+v\n", m)
+ }
+}
+
+type Pea struct {
+ Cotelydon string
+}
+
+type Pod struct {
+ Pea interface{} `xml:"Pea"`
+}
+
+// https://golang.org/issue/6836
+func TestUnmarshalIntoInterface(t *testing.T) {
+ pod := new(Pod)
+ pod.Pea = new(Pea)
+ xml := `<Pod><Pea><Cotelydon>Green stuff</Cotelydon></Pea></Pod>`
+ err := Unmarshal([]byte(xml), pod)
+ if err != nil {
+ t.Fatalf("failed to unmarshal %q: %v", xml, err)
+ }
+ pea, ok := pod.Pea.(*Pea)
+ if !ok {
+ t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea)
+ }
+ have, want := pea.Cotelydon, "Green stuff"
+ if have != want {
+ t.Errorf("failed to unmarshal into interface, have %q want %q", have, want)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go
new file mode 100644
index 000000000..fdde288bc
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go
@@ -0,0 +1,371 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// typeInfo holds details for the xml representation of a type.
+type typeInfo struct {
+ xmlname *fieldInfo
+ fields []fieldInfo
+}
+
+// fieldInfo holds details for the xml representation of a single field.
+type fieldInfo struct {
+ idx []int
+ name string
+ xmlns string
+ flags fieldFlags
+ parents []string
+}
+
+type fieldFlags int
+
+const (
+ fElement fieldFlags = 1 << iota
+ fAttr
+ fCharData
+ fInnerXml
+ fComment
+ fAny
+
+ fOmitEmpty
+
+ fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
+)
+
+var tinfoMap = make(map[reflect.Type]*typeInfo)
+var tinfoLock sync.RWMutex
+
+var nameType = reflect.TypeOf(Name{})
+
+// getTypeInfo returns the typeInfo structure with details necessary
+// for marshalling and unmarshalling typ.
+func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
+ tinfoLock.RLock()
+ tinfo, ok := tinfoMap[typ]
+ tinfoLock.RUnlock()
+ if ok {
+ return tinfo, nil
+ }
+ tinfo = &typeInfo{}
+ if typ.Kind() == reflect.Struct && typ != nameType {
+ n := typ.NumField()
+ for i := 0; i < n; i++ {
+ f := typ.Field(i)
+ if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
+ continue // Private field
+ }
+
+ // For embedded structs, embed its fields.
+ if f.Anonymous {
+ t := f.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Struct {
+ inner, err := getTypeInfo(t)
+ if err != nil {
+ return nil, err
+ }
+ if tinfo.xmlname == nil {
+ tinfo.xmlname = inner.xmlname
+ }
+ for _, finfo := range inner.fields {
+ finfo.idx = append([]int{i}, finfo.idx...)
+ if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+ }
+
+ finfo, err := structFieldInfo(typ, &f)
+ if err != nil {
+ return nil, err
+ }
+
+ if f.Name == "XMLName" {
+ tinfo.xmlname = finfo
+ continue
+ }
+
+ // Add the field if it doesn't conflict with other fields.
+ if err := addFieldInfo(typ, tinfo, finfo); err != nil {
+ return nil, err
+ }
+ }
+ }
+ tinfoLock.Lock()
+ tinfoMap[typ] = tinfo
+ tinfoLock.Unlock()
+ return tinfo, nil
+}
+
+// structFieldInfo builds and returns a fieldInfo for f.
+func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
+ finfo := &fieldInfo{idx: f.Index}
+
+ // Split the tag from the xml namespace if necessary.
+ tag := f.Tag.Get("xml")
+ if i := strings.Index(tag, " "); i >= 0 {
+ finfo.xmlns, tag = tag[:i], tag[i+1:]
+ }
+
+ // Parse flags.
+ tokens := strings.Split(tag, ",")
+ if len(tokens) == 1 {
+ finfo.flags = fElement
+ } else {
+ tag = tokens[0]
+ for _, flag := range tokens[1:] {
+ switch flag {
+ case "attr":
+ finfo.flags |= fAttr
+ case "chardata":
+ finfo.flags |= fCharData
+ case "innerxml":
+ finfo.flags |= fInnerXml
+ case "comment":
+ finfo.flags |= fComment
+ case "any":
+ finfo.flags |= fAny
+ case "omitempty":
+ finfo.flags |= fOmitEmpty
+ }
+ }
+
+ // Validate the flags used.
+ valid := true
+ switch mode := finfo.flags & fMode; mode {
+ case 0:
+ finfo.flags |= fElement
+ case fAttr, fCharData, fInnerXml, fComment, fAny:
+ if f.Name == "XMLName" || tag != "" && mode != fAttr {
+ valid = false
+ }
+ default:
+ // This will also catch multiple modes in a single field.
+ valid = false
+ }
+ if finfo.flags&fMode == fAny {
+ finfo.flags |= fElement
+ }
+ if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
+ valid = false
+ }
+ if !valid {
+ return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
+ f.Name, typ, f.Tag.Get("xml"))
+ }
+ }
+
+ // Use of xmlns without a name is not allowed.
+ if finfo.xmlns != "" && tag == "" {
+ return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
+ f.Name, typ, f.Tag.Get("xml"))
+ }
+
+ if f.Name == "XMLName" {
+ // The XMLName field records the XML element name. Don't
+ // process it as usual because its name should default to
+ // empty rather than to the field name.
+ finfo.name = tag
+ return finfo, nil
+ }
+
+ if tag == "" {
+ // If the name part of the tag is completely empty, get
+ // default from XMLName of underlying struct if feasible,
+ // or field name otherwise.
+ if xmlname := lookupXMLName(f.Type); xmlname != nil {
+ finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
+ } else {
+ finfo.name = f.Name
+ }
+ return finfo, nil
+ }
+
+ if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
+ // If it's an element no namespace specified, get the default
+ // from the XMLName of enclosing struct if possible.
+ if xmlname := lookupXMLName(typ); xmlname != nil {
+ finfo.xmlns = xmlname.xmlns
+ }
+ }
+
+ // Prepare field name and parents.
+ parents := strings.Split(tag, ">")
+ if parents[0] == "" {
+ parents[0] = f.Name
+ }
+ if parents[len(parents)-1] == "" {
+ return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
+ }
+ finfo.name = parents[len(parents)-1]
+ if len(parents) > 1 {
+ if (finfo.flags & fElement) == 0 {
+ return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
+ }
+ finfo.parents = parents[:len(parents)-1]
+ }
+
+ // If the field type has an XMLName field, the names must match
+ // so that the behavior of both marshalling and unmarshalling
+ // is straightforward and unambiguous.
+ if finfo.flags&fElement != 0 {
+ ftyp := f.Type
+ xmlname := lookupXMLName(ftyp)
+ if xmlname != nil && xmlname.name != finfo.name {
+ return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
+ finfo.name, typ, f.Name, xmlname.name, ftyp)
+ }
+ }
+ return finfo, nil
+}
+
+// lookupXMLName returns the fieldInfo for typ's XMLName field
+// in case it exists and has a valid xml field tag, otherwise
+// it returns nil.
+func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
+ for typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+ if typ.Kind() != reflect.Struct {
+ return nil
+ }
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ f := typ.Field(i)
+ if f.Name != "XMLName" {
+ continue
+ }
+ finfo, err := structFieldInfo(typ, &f)
+ if finfo.name != "" && err == nil {
+ return finfo
+ }
+ // Also consider errors as a non-existent field tag
+ // and let getTypeInfo itself report the error.
+ break
+ }
+ return nil
+}
+
+func min(a, b int) int {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+// addFieldInfo adds finfo to tinfo.fields if there are no
+// conflicts, or if conflicts arise from previous fields that were
+// obtained from deeper embedded structures than finfo. In the latter
+// case, the conflicting entries are dropped.
+// A conflict occurs when the path (parent + name) to a field is
+// itself a prefix of another path, or when two paths match exactly.
+// It is okay for field paths to share a common, shorter prefix.
+func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
+ var conflicts []int
+Loop:
+ // First, figure all conflicts. Most working code will have none.
+ for i := range tinfo.fields {
+ oldf := &tinfo.fields[i]
+ if oldf.flags&fMode != newf.flags&fMode {
+ continue
+ }
+ if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
+ continue
+ }
+ minl := min(len(newf.parents), len(oldf.parents))
+ for p := 0; p < minl; p++ {
+ if oldf.parents[p] != newf.parents[p] {
+ continue Loop
+ }
+ }
+ if len(oldf.parents) > len(newf.parents) {
+ if oldf.parents[len(newf.parents)] == newf.name {
+ conflicts = append(conflicts, i)
+ }
+ } else if len(oldf.parents) < len(newf.parents) {
+ if newf.parents[len(oldf.parents)] == oldf.name {
+ conflicts = append(conflicts, i)
+ }
+ } else {
+ if newf.name == oldf.name {
+ conflicts = append(conflicts, i)
+ }
+ }
+ }
+ // Without conflicts, add the new field and return.
+ if conflicts == nil {
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+ }
+
+ // If any conflict is shallower, ignore the new field.
+ // This matches the Go field resolution on embedding.
+ for _, i := range conflicts {
+ if len(tinfo.fields[i].idx) < len(newf.idx) {
+ return nil
+ }
+ }
+
+ // Otherwise, if any of them is at the same depth level, it's an error.
+ for _, i := range conflicts {
+ oldf := &tinfo.fields[i]
+ if len(oldf.idx) == len(newf.idx) {
+ f1 := typ.FieldByIndex(oldf.idx)
+ f2 := typ.FieldByIndex(newf.idx)
+ return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
+ }
+ }
+
+ // Otherwise, the new field is shallower, and thus takes precedence,
+ // so drop the conflicting fields from tinfo and append the new one.
+ for c := len(conflicts) - 1; c >= 0; c-- {
+ i := conflicts[c]
+ copy(tinfo.fields[i:], tinfo.fields[i+1:])
+ tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
+ }
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+}
+
+// A TagPathError represents an error in the unmarshalling process
+// caused by the use of field tags with conflicting paths.
+type TagPathError struct {
+ Struct reflect.Type
+ Field1, Tag1 string
+ Field2, Tag2 string
+}
+
+func (e *TagPathError) Error() string {
+ return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
+}
+
+// value returns v's field value corresponding to finfo.
+// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
+// and dereferences pointers as necessary.
+func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
+ for i, x := range finfo.idx {
+ if i > 0 {
+ t := v.Type()
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/xml.go
new file mode 100644
index 000000000..5b79cbecb
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/xml.go
@@ -0,0 +1,1998 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xml implements a simple XML 1.0 parser that
+// understands XML name spaces.
+package xml
+
+// References:
+// Annotated XML spec: http://www.xml.com/axml/testaxml.htm
+// XML name spaces: http://www.w3.org/TR/REC-xml-names/
+
+// TODO(rsc):
+// Test error handling.
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A SyntaxError represents a syntax error in the XML input stream.
+type SyntaxError struct {
+ Msg string
+ Line int
+}
+
+func (e *SyntaxError) Error() string {
+ return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg
+}
+
+// A Name represents an XML name (Local) annotated with a name space
+// identifier (Space). In tokens returned by Decoder.Token, the Space
+// identifier is given as a canonical URL, not the short prefix used in
+// the document being parsed.
+//
+// As a special case, XML namespace declarations will use the literal
+// string "xmlns" for the Space field instead of the fully resolved URL.
+// See Encoder.EncodeToken for more information on namespace encoding
+// behaviour.
+type Name struct {
+ Space, Local string
+}
+
+// isNamespace reports whether the name is a namespace-defining name.
+func (name Name) isNamespace() bool {
+ return name.Local == "xmlns" || name.Space == "xmlns"
+}
+
+// An Attr represents an attribute in an XML element (Name=Value).
+type Attr struct {
+ Name Name
+ Value string
+}
+
+// A Token is an interface holding one of the token types:
+// StartElement, EndElement, CharData, Comment, ProcInst, or Directive.
+type Token interface{}
+
+// A StartElement represents an XML start element.
+type StartElement struct {
+ Name Name
+ Attr []Attr
+}
+
+func (e StartElement) Copy() StartElement {
+ attrs := make([]Attr, len(e.Attr))
+ copy(attrs, e.Attr)
+ e.Attr = attrs
+ return e
+}
+
+// End returns the corresponding XML end element.
+func (e StartElement) End() EndElement {
+ return EndElement{e.Name}
+}
+
+// setDefaultNamespace sets the namespace of the element
+// as the default for all elements contained within it.
+func (e *StartElement) setDefaultNamespace() {
+ if e.Name.Space == "" {
+ // If there's no namespace on the element, don't
+ // set the default. Strictly speaking this might be wrong, as
+ // we can't tell if the element had no namespace set
+ // or was just using the default namespace.
+ return
+ }
+ // Don't add a default name space if there's already one set.
+ for _, attr := range e.Attr {
+ if attr.Name.Space == "" && attr.Name.Local == "xmlns" {
+ return
+ }
+ }
+ e.Attr = append(e.Attr, Attr{
+ Name: Name{
+ Local: "xmlns",
+ },
+ Value: e.Name.Space,
+ })
+}
+
+// An EndElement represents an XML end element.
+type EndElement struct {
+ Name Name
+}
+
+// A CharData represents XML character data (raw text),
+// in which XML escape sequences have been replaced by
+// the characters they represent.
+type CharData []byte
+
+func makeCopy(b []byte) []byte {
+ b1 := make([]byte, len(b))
+ copy(b1, b)
+ return b1
+}
+
+func (c CharData) Copy() CharData { return CharData(makeCopy(c)) }
+
+// A Comment represents an XML comment of the form <!--comment-->.
+// The bytes do not include the <!-- and --> comment markers.
+type Comment []byte
+
+func (c Comment) Copy() Comment { return Comment(makeCopy(c)) }
+
+// A ProcInst represents an XML processing instruction of the form <?target inst?>
+type ProcInst struct {
+ Target string
+ Inst []byte
+}
+
+func (p ProcInst) Copy() ProcInst {
+ p.Inst = makeCopy(p.Inst)
+ return p
+}
+
+// A Directive represents an XML directive of the form <!text>.
+// The bytes do not include the <! and > markers.
+type Directive []byte
+
+func (d Directive) Copy() Directive { return Directive(makeCopy(d)) }
+
+// CopyToken returns a copy of a Token.
+func CopyToken(t Token) Token {
+ switch v := t.(type) {
+ case CharData:
+ return v.Copy()
+ case Comment:
+ return v.Copy()
+ case Directive:
+ return v.Copy()
+ case ProcInst:
+ return v.Copy()
+ case StartElement:
+ return v.Copy()
+ }
+ return t
+}
+
+// A Decoder represents an XML parser reading a particular input stream.
+// The parser assumes that its input is encoded in UTF-8.
+type Decoder struct {
+ // Strict defaults to true, enforcing the requirements
+ // of the XML specification.
+ // If set to false, the parser allows input containing common
+ // mistakes:
+ // * If an element is missing an end tag, the parser invents
+ // end tags as necessary to keep the return values from Token
+ // properly balanced.
+ // * In attribute values and character data, unknown or malformed
+ // character entities (sequences beginning with &) are left alone.
+ //
+ // Setting:
+ //
+ // d.Strict = false;
+ // d.AutoClose = HTMLAutoClose;
+ // d.Entity = HTMLEntity
+ //
+ // creates a parser that can handle typical HTML.
+ //
+ // Strict mode does not enforce the requirements of the XML name spaces TR.
+ // In particular it does not reject name space tags using undefined prefixes.
+ // Such tags are recorded with the unknown prefix as the name space URL.
+ Strict bool
+
+ // When Strict == false, AutoClose indicates a set of elements to
+ // consider closed immediately after they are opened, regardless
+ // of whether an end element is present.
+ AutoClose []string
+
+ // Entity can be used to map non-standard entity names to string replacements.
+ // The parser behaves as if these standard mappings are present in the map,
+ // regardless of the actual map content:
+ //
+ // "lt": "<",
+ // "gt": ">",
+ // "amp": "&",
+ // "apos": "'",
+ // "quot": `"`,
+ Entity map[string]string
+
+ // CharsetReader, if non-nil, defines a function to generate
+ // charset-conversion readers, converting from the provided
+ // non-UTF-8 charset into UTF-8. If CharsetReader is nil or
+ // returns an error, parsing stops with an error. One of the
+ // the CharsetReader's result values must be non-nil.
+ CharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+ // DefaultSpace sets the default name space used for unadorned tags,
+ // as if the entire XML stream were wrapped in an element containing
+ // the attribute xmlns="DefaultSpace".
+ DefaultSpace string
+
+ r io.ByteReader
+ buf bytes.Buffer
+ saved *bytes.Buffer
+ stk *stack
+ free *stack
+ needClose bool
+ toClose Name
+ nextToken Token
+ nextByte int
+ ns map[string]string
+ err error
+ line int
+ offset int64
+ unmarshalDepth int
+}
+
+// NewDecoder creates a new XML parser reading from r.
+// If r does not implement io.ByteReader, NewDecoder will
+// do its own buffering.
+func NewDecoder(r io.Reader) *Decoder {
+ d := &Decoder{
+ ns: make(map[string]string),
+ nextByte: -1,
+ line: 1,
+ Strict: true,
+ }
+ d.switchToReader(r)
+ return d
+}
+
+// Token returns the next XML token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Slices of bytes in the returned token data refer to the
+// parser's internal buffer and remain valid only until the next
+// call to Token. To acquire a copy of the bytes, call CopyToken
+// or the token's Copy method.
+//
+// Token expands self-closing elements such as <br/>
+// into separate start and end elements returned by successive calls.
+//
+// Token guarantees that the StartElement and EndElement
+// tokens it returns are properly nested and matched:
+// if Token encounters an unexpected end element,
+// it will return an error.
+//
+// Token implements XML name spaces as described by
+// http://www.w3.org/TR/REC-xml-names/. Each of the
+// Name structures contained in the Token has the Space
+// set to the URL identifying its name space when known.
+// If Token encounters an unrecognized name space prefix,
+// it uses the prefix as the Space rather than report an error.
+func (d *Decoder) Token() (t Token, err error) {
+ if d.stk != nil && d.stk.kind == stkEOF {
+ err = io.EOF
+ return
+ }
+ if d.nextToken != nil {
+ t = d.nextToken
+ d.nextToken = nil
+ } else if t, err = d.rawToken(); err != nil {
+ return
+ }
+
+ if !d.Strict {
+ if t1, ok := d.autoClose(t); ok {
+ d.nextToken = t
+ t = t1
+ }
+ }
+ switch t1 := t.(type) {
+ case StartElement:
+ // In XML name spaces, the translations listed in the
+ // attributes apply to the element name and
+ // to the other attribute names, so process
+ // the translations first.
+ for _, a := range t1.Attr {
+ if a.Name.Space == "xmlns" {
+ v, ok := d.ns[a.Name.Local]
+ d.pushNs(a.Name.Local, v, ok)
+ d.ns[a.Name.Local] = a.Value
+ }
+ if a.Name.Space == "" && a.Name.Local == "xmlns" {
+ // Default space for untagged names
+ v, ok := d.ns[""]
+ d.pushNs("", v, ok)
+ d.ns[""] = a.Value
+ }
+ }
+
+ d.translate(&t1.Name, true)
+ for i := range t1.Attr {
+ d.translate(&t1.Attr[i].Name, false)
+ }
+ d.pushElement(t1.Name)
+ t = t1
+
+ case EndElement:
+ d.translate(&t1.Name, true)
+ if !d.popElement(&t1) {
+ return nil, d.err
+ }
+ t = t1
+ }
+ return
+}
+
+const xmlURL = "http://www.w3.org/XML/1998/namespace"
+
+// Apply name space translation to name n.
+// The default name space (for Space=="")
+// applies only to element names, not to attribute names.
+func (d *Decoder) translate(n *Name, isElementName bool) {
+ switch {
+ case n.Space == "xmlns":
+ return
+ case n.Space == "" && !isElementName:
+ return
+ case n.Space == "xml":
+ n.Space = xmlURL
+ case n.Space == "" && n.Local == "xmlns":
+ return
+ }
+ if v, ok := d.ns[n.Space]; ok {
+ n.Space = v
+ } else if n.Space == "" {
+ n.Space = d.DefaultSpace
+ }
+}
+
+func (d *Decoder) switchToReader(r io.Reader) {
+ // Get efficient byte at a time reader.
+ // Assume that if reader has its own
+ // ReadByte, it's efficient enough.
+ // Otherwise, use bufio.
+ if rb, ok := r.(io.ByteReader); ok {
+ d.r = rb
+ } else {
+ d.r = bufio.NewReader(r)
+ }
+}
+
+// Parsing state - stack holds old name space translations
+// and the current set of open elements. The translations to pop when
+// ending a given tag are *below* it on the stack, which is
+// more work but forced on us by XML.
+type stack struct {
+ next *stack
+ kind int
+ name Name
+ ok bool
+}
+
+const (
+ stkStart = iota
+ stkNs
+ stkEOF
+)
+
+func (d *Decoder) push(kind int) *stack {
+ s := d.free
+ if s != nil {
+ d.free = s.next
+ } else {
+ s = new(stack)
+ }
+ s.next = d.stk
+ s.kind = kind
+ d.stk = s
+ return s
+}
+
+func (d *Decoder) pop() *stack {
+ s := d.stk
+ if s != nil {
+ d.stk = s.next
+ s.next = d.free
+ d.free = s
+ }
+ return s
+}
+
+// Record that after the current element is finished
+// (that element is already pushed on the stack)
+// Token should return EOF until popEOF is called.
+func (d *Decoder) pushEOF() {
+ // Walk down stack to find Start.
+ // It might not be the top, because there might be stkNs
+ // entries above it.
+ start := d.stk
+ for start.kind != stkStart {
+ start = start.next
+ }
+ // The stkNs entries below a start are associated with that
+ // element too; skip over them.
+ for start.next != nil && start.next.kind == stkNs {
+ start = start.next
+ }
+ s := d.free
+ if s != nil {
+ d.free = s.next
+ } else {
+ s = new(stack)
+ }
+ s.kind = stkEOF
+ s.next = start.next
+ start.next = s
+}
+
+// Undo a pushEOF.
+// The element must have been finished, so the EOF should be at the top of the stack.
+func (d *Decoder) popEOF() bool {
+ if d.stk == nil || d.stk.kind != stkEOF {
+ return false
+ }
+ d.pop()
+ return true
+}
+
+// Record that we are starting an element with the given name.
+func (d *Decoder) pushElement(name Name) {
+ s := d.push(stkStart)
+ s.name = name
+}
+
+// Record that we are changing the value of ns[local].
+// The old value is url, ok.
+func (d *Decoder) pushNs(local string, url string, ok bool) {
+ s := d.push(stkNs)
+ s.name.Local = local
+ s.name.Space = url
+ s.ok = ok
+}
+
+// Creates a SyntaxError with the current line number.
+func (d *Decoder) syntaxError(msg string) error {
+ return &SyntaxError{Msg: msg, Line: d.line}
+}
+
+// Record that we are ending an element with the given name.
+// The name must match the record at the top of the stack,
+// which must be a pushElement record.
+// After popping the element, apply any undo records from
+// the stack to restore the name translations that existed
+// before we saw this element.
+func (d *Decoder) popElement(t *EndElement) bool {
+ s := d.pop()
+ name := t.Name
+ switch {
+ case s == nil || s.kind != stkStart:
+ d.err = d.syntaxError("unexpected end element </" + name.Local + ">")
+ return false
+ case s.name.Local != name.Local:
+ if !d.Strict {
+ d.needClose = true
+ d.toClose = t.Name
+ t.Name = s.name
+ return true
+ }
+ d.err = d.syntaxError("element <" + s.name.Local + "> closed by </" + name.Local + ">")
+ return false
+ case s.name.Space != name.Space:
+ d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space +
+ "closed by </" + name.Local + "> in space " + name.Space)
+ return false
+ }
+
+ // Pop stack until a Start or EOF is on the top, undoing the
+ // translations that were associated with the element we just closed.
+ for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF {
+ s := d.pop()
+ if s.ok {
+ d.ns[s.name.Local] = s.name.Space
+ } else {
+ delete(d.ns, s.name.Local)
+ }
+ }
+
+ return true
+}
+
+// If the top element on the stack is autoclosing and
+// t is not the end tag, invent the end tag.
+func (d *Decoder) autoClose(t Token) (Token, bool) {
+ if d.stk == nil || d.stk.kind != stkStart {
+ return nil, false
+ }
+ name := strings.ToLower(d.stk.name.Local)
+ for _, s := range d.AutoClose {
+ if strings.ToLower(s) == name {
+ // This one should be auto closed if t doesn't close it.
+ et, ok := t.(EndElement)
+ if !ok || et.Name.Local != name {
+ return EndElement{d.stk.name}, true
+ }
+ break
+ }
+ }
+ return nil, false
+}
+
+var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method")
+
+// RawToken is like Token but does not verify that
+// start and end elements match and does not translate
+// name space prefixes to their corresponding URLs.
+func (d *Decoder) RawToken() (Token, error) {
+ if d.unmarshalDepth > 0 {
+ return nil, errRawToken
+ }
+ return d.rawToken()
+}
+
+func (d *Decoder) rawToken() (Token, error) {
+ if d.err != nil {
+ return nil, d.err
+ }
+ if d.needClose {
+ // The last element we read was self-closing and
+ // we returned just the StartElement half.
+ // Return the EndElement half now.
+ d.needClose = false
+ return EndElement{d.toClose}, nil
+ }
+
+ b, ok := d.getc()
+ if !ok {
+ return nil, d.err
+ }
+
+ if b != '<' {
+ // Text section.
+ d.ungetc(b)
+ data := d.text(-1, false)
+ if data == nil {
+ return nil, d.err
+ }
+ return CharData(data), nil
+ }
+
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ switch b {
+ case '/':
+ // </: End element
+ var name Name
+ if name, ok = d.nsname(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected element name after </")
+ }
+ return nil, d.err
+ }
+ d.space()
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '>' {
+ d.err = d.syntaxError("invalid characters between </" + name.Local + " and >")
+ return nil, d.err
+ }
+ return EndElement{name}, nil
+
+ case '?':
+ // <?: Processing instruction.
+ var target string
+ if target, ok = d.name(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected target name after <?")
+ }
+ return nil, d.err
+ }
+ d.space()
+ d.buf.Reset()
+ var b0 byte
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ d.buf.WriteByte(b)
+ if b0 == '?' && b == '>' {
+ break
+ }
+ b0 = b
+ }
+ data := d.buf.Bytes()
+ data = data[0 : len(data)-2] // chop ?>
+
+ if target == "xml" {
+ content := string(data)
+ ver := procInst("version", content)
+ if ver != "" && ver != "1.0" {
+ d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver)
+ return nil, d.err
+ }
+ enc := procInst("encoding", content)
+ if enc != "" && enc != "utf-8" && enc != "UTF-8" {
+ if d.CharsetReader == nil {
+ d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc)
+ return nil, d.err
+ }
+ newr, err := d.CharsetReader(enc, d.r.(io.Reader))
+ if err != nil {
+ d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err)
+ return nil, d.err
+ }
+ if newr == nil {
+ panic("CharsetReader returned a nil Reader for charset " + enc)
+ }
+ d.switchToReader(newr)
+ }
+ }
+ return ProcInst{target, data}, nil
+
+ case '!':
+ // <!: Maybe comment, maybe CDATA.
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ switch b {
+ case '-': // <!-
+ // Probably <!-- for a comment.
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '-' {
+ d.err = d.syntaxError("invalid sequence <!- not part of <!--")
+ return nil, d.err
+ }
+ // Look for terminator.
+ d.buf.Reset()
+ var b0, b1 byte
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ d.buf.WriteByte(b)
+ if b0 == '-' && b1 == '-' && b == '>' {
+ break
+ }
+ b0, b1 = b1, b
+ }
+ data := d.buf.Bytes()
+ data = data[0 : len(data)-3] // chop -->
+ return Comment(data), nil
+
+ case '[': // <![
+ // Probably <![CDATA[.
+ for i := 0; i < 6; i++ {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != "CDATA["[i] {
+ d.err = d.syntaxError("invalid <![ sequence")
+ return nil, d.err
+ }
+ }
+ // Have <![CDATA[. Read text until ]]>.
+ data := d.text(-1, true)
+ if data == nil {
+ return nil, d.err
+ }
+ return CharData(data), nil
+ }
+
+ // Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc.
+ // We don't care, but accumulate for caller. Quoted angle
+ // brackets do not count for nesting.
+ d.buf.Reset()
+ d.buf.WriteByte(b)
+ inquote := uint8(0)
+ depth := 0
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if inquote == 0 && b == '>' && depth == 0 {
+ break
+ }
+ HandleB:
+ d.buf.WriteByte(b)
+ switch {
+ case b == inquote:
+ inquote = 0
+
+ case inquote != 0:
+ // in quotes, no special action
+
+ case b == '\'' || b == '"':
+ inquote = b
+
+ case b == '>' && inquote == 0:
+ depth--
+
+ case b == '<' && inquote == 0:
+ // Look for <!-- to begin comment.
+ s := "!--"
+ for i := 0; i < len(s); i++ {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != s[i] {
+ for j := 0; j < i; j++ {
+ d.buf.WriteByte(s[j])
+ }
+ depth++
+ goto HandleB
+ }
+ }
+
+ // Remove < that was written above.
+ d.buf.Truncate(d.buf.Len() - 1)
+
+ // Look for terminator.
+ var b0, b1 byte
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b0 == '-' && b1 == '-' && b == '>' {
+ break
+ }
+ b0, b1 = b1, b
+ }
+ }
+ }
+ return Directive(d.buf.Bytes()), nil
+ }
+
+ // Must be an open element like <a href="foo">
+ d.ungetc(b)
+
+ var (
+ name Name
+ empty bool
+ attr []Attr
+ )
+ if name, ok = d.nsname(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected element name after <")
+ }
+ return nil, d.err
+ }
+
+ attr = []Attr{}
+ for {
+ d.space()
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b == '/' {
+ empty = true
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '>' {
+ d.err = d.syntaxError("expected /> in element")
+ return nil, d.err
+ }
+ break
+ }
+ if b == '>' {
+ break
+ }
+ d.ungetc(b)
+
+ n := len(attr)
+ if n >= cap(attr) {
+ nCap := 2 * cap(attr)
+ if nCap == 0 {
+ nCap = 4
+ }
+ nattr := make([]Attr, n, nCap)
+ copy(nattr, attr)
+ attr = nattr
+ }
+ attr = attr[0 : n+1]
+ a := &attr[n]
+ if a.Name, ok = d.nsname(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected attribute name in element")
+ }
+ return nil, d.err
+ }
+ d.space()
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '=' {
+ if d.Strict {
+ d.err = d.syntaxError("attribute name without = in element")
+ return nil, d.err
+ } else {
+ d.ungetc(b)
+ a.Value = a.Name.Local
+ }
+ } else {
+ d.space()
+ data := d.attrval()
+ if data == nil {
+ return nil, d.err
+ }
+ a.Value = string(data)
+ }
+ }
+ if empty {
+ d.needClose = true
+ d.toClose = name
+ }
+ return StartElement{name, attr}, nil
+}
+
+func (d *Decoder) attrval() []byte {
+ b, ok := d.mustgetc()
+ if !ok {
+ return nil
+ }
+ // Handle quoted attribute values
+ if b == '"' || b == '\'' {
+ return d.text(int(b), false)
+ }
+ // Handle unquoted attribute values for strict parsers
+ if d.Strict {
+ d.err = d.syntaxError("unquoted or missing attribute value in element")
+ return nil
+ }
+ // Handle unquoted attribute values for unstrict parsers
+ d.ungetc(b)
+ d.buf.Reset()
+ for {
+ b, ok = d.mustgetc()
+ if !ok {
+ return nil
+ }
+ // http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2
+ if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' {
+ d.buf.WriteByte(b)
+ } else {
+ d.ungetc(b)
+ break
+ }
+ }
+ return d.buf.Bytes()
+}
+
+// Skip spaces if any
+func (d *Decoder) space() {
+ for {
+ b, ok := d.getc()
+ if !ok {
+ return
+ }
+ switch b {
+ case ' ', '\r', '\n', '\t':
+ default:
+ d.ungetc(b)
+ return
+ }
+ }
+}
+
+// Read a single byte.
+// If there is no byte to read, return ok==false
+// and leave the error in d.err.
+// Maintain line number.
+func (d *Decoder) getc() (b byte, ok bool) {
+ if d.err != nil {
+ return 0, false
+ }
+ if d.nextByte >= 0 {
+ b = byte(d.nextByte)
+ d.nextByte = -1
+ } else {
+ b, d.err = d.r.ReadByte()
+ if d.err != nil {
+ return 0, false
+ }
+ if d.saved != nil {
+ d.saved.WriteByte(b)
+ }
+ }
+ if b == '\n' {
+ d.line++
+ }
+ d.offset++
+ return b, true
+}
+
+// InputOffset returns the input stream byte offset of the current decoder position.
+// The offset gives the location of the end of the most recently returned token
+// and the beginning of the next token.
+func (d *Decoder) InputOffset() int64 {
+ return d.offset
+}
+
+// Return saved offset.
+// If we did ungetc (nextByte >= 0), have to back up one.
+func (d *Decoder) savedOffset() int {
+ n := d.saved.Len()
+ if d.nextByte >= 0 {
+ n--
+ }
+ return n
+}
+
+// Must read a single byte.
+// If there is no byte to read,
+// set d.err to SyntaxError("unexpected EOF")
+// and return ok==false
+func (d *Decoder) mustgetc() (b byte, ok bool) {
+ if b, ok = d.getc(); !ok {
+ if d.err == io.EOF {
+ d.err = d.syntaxError("unexpected EOF")
+ }
+ }
+ return
+}
+
+// Unread a single byte.
+func (d *Decoder) ungetc(b byte) {
+ if b == '\n' {
+ d.line--
+ }
+ d.nextByte = int(b)
+ d.offset--
+}
+
+var entity = map[string]int{
+ "lt": '<',
+ "gt": '>',
+ "amp": '&',
+ "apos": '\'',
+ "quot": '"',
+}
+
+// Read plain text section (XML calls it character data).
+// If quote >= 0, we are in a quoted string and need to find the matching quote.
+// If cdata == true, we are in a <![CDATA[ section and need to find ]]>.
+// On failure return nil and leave the error in d.err.
+func (d *Decoder) text(quote int, cdata bool) []byte {
+ var b0, b1 byte
+ var trunc int
+ d.buf.Reset()
+Input:
+ for {
+ b, ok := d.getc()
+ if !ok {
+ if cdata {
+ if d.err == io.EOF {
+ d.err = d.syntaxError("unexpected EOF in CDATA section")
+ }
+ return nil
+ }
+ break Input
+ }
+
+ // <![CDATA[ section ends with ]]>.
+ // It is an error for ]]> to appear in ordinary text.
+ if b0 == ']' && b1 == ']' && b == '>' {
+ if cdata {
+ trunc = 2
+ break Input
+ }
+ d.err = d.syntaxError("unescaped ]]> not in CDATA section")
+ return nil
+ }
+
+ // Stop reading text if we see a <.
+ if b == '<' && !cdata {
+ if quote >= 0 {
+ d.err = d.syntaxError("unescaped < inside quoted string")
+ return nil
+ }
+ d.ungetc('<')
+ break Input
+ }
+ if quote >= 0 && b == byte(quote) {
+ break Input
+ }
+ if b == '&' && !cdata {
+ // Read escaped character expression up to semicolon.
+ // XML in all its glory allows a document to define and use
+ // its own character names with <!ENTITY ...> directives.
+ // Parsers are required to recognize lt, gt, amp, apos, and quot
+ // even if they have not been declared.
+ before := d.buf.Len()
+ d.buf.WriteByte('&')
+ var ok bool
+ var text string
+ var haveText bool
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ if b == '#' {
+ d.buf.WriteByte(b)
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ base := 10
+ if b == 'x' {
+ base = 16
+ d.buf.WriteByte(b)
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ }
+ start := d.buf.Len()
+ for '0' <= b && b <= '9' ||
+ base == 16 && 'a' <= b && b <= 'f' ||
+ base == 16 && 'A' <= b && b <= 'F' {
+ d.buf.WriteByte(b)
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ }
+ if b != ';' {
+ d.ungetc(b)
+ } else {
+ s := string(d.buf.Bytes()[start:])
+ d.buf.WriteByte(';')
+ n, err := strconv.ParseUint(s, base, 64)
+ if err == nil && n <= unicode.MaxRune {
+ text = string(n)
+ haveText = true
+ }
+ }
+ } else {
+ d.ungetc(b)
+ if !d.readName() {
+ if d.err != nil {
+ return nil
+ }
+ ok = false
+ }
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ if b != ';' {
+ d.ungetc(b)
+ } else {
+ name := d.buf.Bytes()[before+1:]
+ d.buf.WriteByte(';')
+ if isName(name) {
+ s := string(name)
+ if r, ok := entity[s]; ok {
+ text = string(r)
+ haveText = true
+ } else if d.Entity != nil {
+ text, haveText = d.Entity[s]
+ }
+ }
+ }
+ }
+
+ if haveText {
+ d.buf.Truncate(before)
+ d.buf.Write([]byte(text))
+ b0, b1 = 0, 0
+ continue Input
+ }
+ if !d.Strict {
+ b0, b1 = 0, 0
+ continue Input
+ }
+ ent := string(d.buf.Bytes()[before:])
+ if ent[len(ent)-1] != ';' {
+ ent += " (no semicolon)"
+ }
+ d.err = d.syntaxError("invalid character entity " + ent)
+ return nil
+ }
+
+ // We must rewrite unescaped \r and \r\n into \n.
+ if b == '\r' {
+ d.buf.WriteByte('\n')
+ } else if b1 == '\r' && b == '\n' {
+ // Skip \r\n--we already wrote \n.
+ } else {
+ d.buf.WriteByte(b)
+ }
+
+ b0, b1 = b1, b
+ }
+ data := d.buf.Bytes()
+ data = data[0 : len(data)-trunc]
+
+ // Inspect each rune for being a disallowed character.
+ buf := data
+ for len(buf) > 0 {
+ r, size := utf8.DecodeRune(buf)
+ if r == utf8.RuneError && size == 1 {
+ d.err = d.syntaxError("invalid UTF-8")
+ return nil
+ }
+ buf = buf[size:]
+ if !isInCharacterRange(r) {
+ d.err = d.syntaxError(fmt.Sprintf("illegal character code %U", r))
+ return nil
+ }
+ }
+
+ return data
+}
+
+// Decide whether the given rune is in the XML Character Range, per
+// the Char production of http://www.xml.com/axml/testaxml.htm,
+// Section 2.2 Characters.
+func isInCharacterRange(r rune) (inrange bool) {
+ return r == 0x09 ||
+ r == 0x0A ||
+ r == 0x0D ||
+ r >= 0x20 && r <= 0xDF77 ||
+ r >= 0xE000 && r <= 0xFFFD ||
+ r >= 0x10000 && r <= 0x10FFFF
+}
+
+// Get name space name: name with a : stuck in the middle.
+// The part before the : is the name space identifier.
+func (d *Decoder) nsname() (name Name, ok bool) {
+ s, ok := d.name()
+ if !ok {
+ return
+ }
+ i := strings.Index(s, ":")
+ if i < 0 {
+ name.Local = s
+ } else {
+ name.Space = s[0:i]
+ name.Local = s[i+1:]
+ }
+ return name, true
+}
+
+// Get name: /first(first|second)*/
+// Do not set d.err if the name is missing (unless unexpected EOF is received):
+// let the caller provide better context.
+func (d *Decoder) name() (s string, ok bool) {
+ d.buf.Reset()
+ if !d.readName() {
+ return "", false
+ }
+
+ // Now we check the characters.
+ b := d.buf.Bytes()
+ if !isName(b) {
+ d.err = d.syntaxError("invalid XML name: " + string(b))
+ return "", false
+ }
+ return string(b), true
+}
+
+// Read a name and append its bytes to d.buf.
+// The name is delimited by any single-byte character not valid in names.
+// All multi-byte characters are accepted; the caller must check their validity.
+func (d *Decoder) readName() (ok bool) {
+ var b byte
+ if b, ok = d.mustgetc(); !ok {
+ return
+ }
+ if b < utf8.RuneSelf && !isNameByte(b) {
+ d.ungetc(b)
+ return false
+ }
+ d.buf.WriteByte(b)
+
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return
+ }
+ if b < utf8.RuneSelf && !isNameByte(b) {
+ d.ungetc(b)
+ break
+ }
+ d.buf.WriteByte(b)
+ }
+ return true
+}
+
+func isNameByte(c byte) bool {
+ return 'A' <= c && c <= 'Z' ||
+ 'a' <= c && c <= 'z' ||
+ '0' <= c && c <= '9' ||
+ c == '_' || c == ':' || c == '.' || c == '-'
+}
+
+func isName(s []byte) bool {
+ if len(s) == 0 {
+ return false
+ }
+ c, n := utf8.DecodeRune(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) {
+ return false
+ }
+ for n < len(s) {
+ s = s[n:]
+ c, n = utf8.DecodeRune(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) && !unicode.Is(second, c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isNameString(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+ c, n := utf8.DecodeRuneInString(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) {
+ return false
+ }
+ for n < len(s) {
+ s = s[n:]
+ c, n = utf8.DecodeRuneInString(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) && !unicode.Is(second, c) {
+ return false
+ }
+ }
+ return true
+}
+
+// These tables were generated by cut and paste from Appendix B of
+// the XML spec at http://www.xml.com/axml/testaxml.htm
+// and then reformatting. First corresponds to (Letter | '_' | ':')
+// and second corresponds to NameChar.
+
+var first = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x003A, 0x003A, 1},
+ {0x0041, 0x005A, 1},
+ {0x005F, 0x005F, 1},
+ {0x0061, 0x007A, 1},
+ {0x00C0, 0x00D6, 1},
+ {0x00D8, 0x00F6, 1},
+ {0x00F8, 0x00FF, 1},
+ {0x0100, 0x0131, 1},
+ {0x0134, 0x013E, 1},
+ {0x0141, 0x0148, 1},
+ {0x014A, 0x017E, 1},
+ {0x0180, 0x01C3, 1},
+ {0x01CD, 0x01F0, 1},
+ {0x01F4, 0x01F5, 1},
+ {0x01FA, 0x0217, 1},
+ {0x0250, 0x02A8, 1},
+ {0x02BB, 0x02C1, 1},
+ {0x0386, 0x0386, 1},
+ {0x0388, 0x038A, 1},
+ {0x038C, 0x038C, 1},
+ {0x038E, 0x03A1, 1},
+ {0x03A3, 0x03CE, 1},
+ {0x03D0, 0x03D6, 1},
+ {0x03DA, 0x03E0, 2},
+ {0x03E2, 0x03F3, 1},
+ {0x0401, 0x040C, 1},
+ {0x040E, 0x044F, 1},
+ {0x0451, 0x045C, 1},
+ {0x045E, 0x0481, 1},
+ {0x0490, 0x04C4, 1},
+ {0x04C7, 0x04C8, 1},
+ {0x04CB, 0x04CC, 1},
+ {0x04D0, 0x04EB, 1},
+ {0x04EE, 0x04F5, 1},
+ {0x04F8, 0x04F9, 1},
+ {0x0531, 0x0556, 1},
+ {0x0559, 0x0559, 1},
+ {0x0561, 0x0586, 1},
+ {0x05D0, 0x05EA, 1},
+ {0x05F0, 0x05F2, 1},
+ {0x0621, 0x063A, 1},
+ {0x0641, 0x064A, 1},
+ {0x0671, 0x06B7, 1},
+ {0x06BA, 0x06BE, 1},
+ {0x06C0, 0x06CE, 1},
+ {0x06D0, 0x06D3, 1},
+ {0x06D5, 0x06D5, 1},
+ {0x06E5, 0x06E6, 1},
+ {0x0905, 0x0939, 1},
+ {0x093D, 0x093D, 1},
+ {0x0958, 0x0961, 1},
+ {0x0985, 0x098C, 1},
+ {0x098F, 0x0990, 1},
+ {0x0993, 0x09A8, 1},
+ {0x09AA, 0x09B0, 1},
+ {0x09B2, 0x09B2, 1},
+ {0x09B6, 0x09B9, 1},
+ {0x09DC, 0x09DD, 1},
+ {0x09DF, 0x09E1, 1},
+ {0x09F0, 0x09F1, 1},
+ {0x0A05, 0x0A0A, 1},
+ {0x0A0F, 0x0A10, 1},
+ {0x0A13, 0x0A28, 1},
+ {0x0A2A, 0x0A30, 1},
+ {0x0A32, 0x0A33, 1},
+ {0x0A35, 0x0A36, 1},
+ {0x0A38, 0x0A39, 1},
+ {0x0A59, 0x0A5C, 1},
+ {0x0A5E, 0x0A5E, 1},
+ {0x0A72, 0x0A74, 1},
+ {0x0A85, 0x0A8B, 1},
+ {0x0A8D, 0x0A8D, 1},
+ {0x0A8F, 0x0A91, 1},
+ {0x0A93, 0x0AA8, 1},
+ {0x0AAA, 0x0AB0, 1},
+ {0x0AB2, 0x0AB3, 1},
+ {0x0AB5, 0x0AB9, 1},
+ {0x0ABD, 0x0AE0, 0x23},
+ {0x0B05, 0x0B0C, 1},
+ {0x0B0F, 0x0B10, 1},
+ {0x0B13, 0x0B28, 1},
+ {0x0B2A, 0x0B30, 1},
+ {0x0B32, 0x0B33, 1},
+ {0x0B36, 0x0B39, 1},
+ {0x0B3D, 0x0B3D, 1},
+ {0x0B5C, 0x0B5D, 1},
+ {0x0B5F, 0x0B61, 1},
+ {0x0B85, 0x0B8A, 1},
+ {0x0B8E, 0x0B90, 1},
+ {0x0B92, 0x0B95, 1},
+ {0x0B99, 0x0B9A, 1},
+ {0x0B9C, 0x0B9C, 1},
+ {0x0B9E, 0x0B9F, 1},
+ {0x0BA3, 0x0BA4, 1},
+ {0x0BA8, 0x0BAA, 1},
+ {0x0BAE, 0x0BB5, 1},
+ {0x0BB7, 0x0BB9, 1},
+ {0x0C05, 0x0C0C, 1},
+ {0x0C0E, 0x0C10, 1},
+ {0x0C12, 0x0C28, 1},
+ {0x0C2A, 0x0C33, 1},
+ {0x0C35, 0x0C39, 1},
+ {0x0C60, 0x0C61, 1},
+ {0x0C85, 0x0C8C, 1},
+ {0x0C8E, 0x0C90, 1},
+ {0x0C92, 0x0CA8, 1},
+ {0x0CAA, 0x0CB3, 1},
+ {0x0CB5, 0x0CB9, 1},
+ {0x0CDE, 0x0CDE, 1},
+ {0x0CE0, 0x0CE1, 1},
+ {0x0D05, 0x0D0C, 1},
+ {0x0D0E, 0x0D10, 1},
+ {0x0D12, 0x0D28, 1},
+ {0x0D2A, 0x0D39, 1},
+ {0x0D60, 0x0D61, 1},
+ {0x0E01, 0x0E2E, 1},
+ {0x0E30, 0x0E30, 1},
+ {0x0E32, 0x0E33, 1},
+ {0x0E40, 0x0E45, 1},
+ {0x0E81, 0x0E82, 1},
+ {0x0E84, 0x0E84, 1},
+ {0x0E87, 0x0E88, 1},
+ {0x0E8A, 0x0E8D, 3},
+ {0x0E94, 0x0E97, 1},
+ {0x0E99, 0x0E9F, 1},
+ {0x0EA1, 0x0EA3, 1},
+ {0x0EA5, 0x0EA7, 2},
+ {0x0EAA, 0x0EAB, 1},
+ {0x0EAD, 0x0EAE, 1},
+ {0x0EB0, 0x0EB0, 1},
+ {0x0EB2, 0x0EB3, 1},
+ {0x0EBD, 0x0EBD, 1},
+ {0x0EC0, 0x0EC4, 1},
+ {0x0F40, 0x0F47, 1},
+ {0x0F49, 0x0F69, 1},
+ {0x10A0, 0x10C5, 1},
+ {0x10D0, 0x10F6, 1},
+ {0x1100, 0x1100, 1},
+ {0x1102, 0x1103, 1},
+ {0x1105, 0x1107, 1},
+ {0x1109, 0x1109, 1},
+ {0x110B, 0x110C, 1},
+ {0x110E, 0x1112, 1},
+ {0x113C, 0x1140, 2},
+ {0x114C, 0x1150, 2},
+ {0x1154, 0x1155, 1},
+ {0x1159, 0x1159, 1},
+ {0x115F, 0x1161, 1},
+ {0x1163, 0x1169, 2},
+ {0x116D, 0x116E, 1},
+ {0x1172, 0x1173, 1},
+ {0x1175, 0x119E, 0x119E - 0x1175},
+ {0x11A8, 0x11AB, 0x11AB - 0x11A8},
+ {0x11AE, 0x11AF, 1},
+ {0x11B7, 0x11B8, 1},
+ {0x11BA, 0x11BA, 1},
+ {0x11BC, 0x11C2, 1},
+ {0x11EB, 0x11F0, 0x11F0 - 0x11EB},
+ {0x11F9, 0x11F9, 1},
+ {0x1E00, 0x1E9B, 1},
+ {0x1EA0, 0x1EF9, 1},
+ {0x1F00, 0x1F15, 1},
+ {0x1F18, 0x1F1D, 1},
+ {0x1F20, 0x1F45, 1},
+ {0x1F48, 0x1F4D, 1},
+ {0x1F50, 0x1F57, 1},
+ {0x1F59, 0x1F5B, 0x1F5B - 0x1F59},
+ {0x1F5D, 0x1F5D, 1},
+ {0x1F5F, 0x1F7D, 1},
+ {0x1F80, 0x1FB4, 1},
+ {0x1FB6, 0x1FBC, 1},
+ {0x1FBE, 0x1FBE, 1},
+ {0x1FC2, 0x1FC4, 1},
+ {0x1FC6, 0x1FCC, 1},
+ {0x1FD0, 0x1FD3, 1},
+ {0x1FD6, 0x1FDB, 1},
+ {0x1FE0, 0x1FEC, 1},
+ {0x1FF2, 0x1FF4, 1},
+ {0x1FF6, 0x1FFC, 1},
+ {0x2126, 0x2126, 1},
+ {0x212A, 0x212B, 1},
+ {0x212E, 0x212E, 1},
+ {0x2180, 0x2182, 1},
+ {0x3007, 0x3007, 1},
+ {0x3021, 0x3029, 1},
+ {0x3041, 0x3094, 1},
+ {0x30A1, 0x30FA, 1},
+ {0x3105, 0x312C, 1},
+ {0x4E00, 0x9FA5, 1},
+ {0xAC00, 0xD7A3, 1},
+ },
+}
+
+var second = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x002D, 0x002E, 1},
+ {0x0030, 0x0039, 1},
+ {0x00B7, 0x00B7, 1},
+ {0x02D0, 0x02D1, 1},
+ {0x0300, 0x0345, 1},
+ {0x0360, 0x0361, 1},
+ {0x0387, 0x0387, 1},
+ {0x0483, 0x0486, 1},
+ {0x0591, 0x05A1, 1},
+ {0x05A3, 0x05B9, 1},
+ {0x05BB, 0x05BD, 1},
+ {0x05BF, 0x05BF, 1},
+ {0x05C1, 0x05C2, 1},
+ {0x05C4, 0x0640, 0x0640 - 0x05C4},
+ {0x064B, 0x0652, 1},
+ {0x0660, 0x0669, 1},
+ {0x0670, 0x0670, 1},
+ {0x06D6, 0x06DC, 1},
+ {0x06DD, 0x06DF, 1},
+ {0x06E0, 0x06E4, 1},
+ {0x06E7, 0x06E8, 1},
+ {0x06EA, 0x06ED, 1},
+ {0x06F0, 0x06F9, 1},
+ {0x0901, 0x0903, 1},
+ {0x093C, 0x093C, 1},
+ {0x093E, 0x094C, 1},
+ {0x094D, 0x094D, 1},
+ {0x0951, 0x0954, 1},
+ {0x0962, 0x0963, 1},
+ {0x0966, 0x096F, 1},
+ {0x0981, 0x0983, 1},
+ {0x09BC, 0x09BC, 1},
+ {0x09BE, 0x09BF, 1},
+ {0x09C0, 0x09C4, 1},
+ {0x09C7, 0x09C8, 1},
+ {0x09CB, 0x09CD, 1},
+ {0x09D7, 0x09D7, 1},
+ {0x09E2, 0x09E3, 1},
+ {0x09E6, 0x09EF, 1},
+ {0x0A02, 0x0A3C, 0x3A},
+ {0x0A3E, 0x0A3F, 1},
+ {0x0A40, 0x0A42, 1},
+ {0x0A47, 0x0A48, 1},
+ {0x0A4B, 0x0A4D, 1},
+ {0x0A66, 0x0A6F, 1},
+ {0x0A70, 0x0A71, 1},
+ {0x0A81, 0x0A83, 1},
+ {0x0ABC, 0x0ABC, 1},
+ {0x0ABE, 0x0AC5, 1},
+ {0x0AC7, 0x0AC9, 1},
+ {0x0ACB, 0x0ACD, 1},
+ {0x0AE6, 0x0AEF, 1},
+ {0x0B01, 0x0B03, 1},
+ {0x0B3C, 0x0B3C, 1},
+ {0x0B3E, 0x0B43, 1},
+ {0x0B47, 0x0B48, 1},
+ {0x0B4B, 0x0B4D, 1},
+ {0x0B56, 0x0B57, 1},
+ {0x0B66, 0x0B6F, 1},
+ {0x0B82, 0x0B83, 1},
+ {0x0BBE, 0x0BC2, 1},
+ {0x0BC6, 0x0BC8, 1},
+ {0x0BCA, 0x0BCD, 1},
+ {0x0BD7, 0x0BD7, 1},
+ {0x0BE7, 0x0BEF, 1},
+ {0x0C01, 0x0C03, 1},
+ {0x0C3E, 0x0C44, 1},
+ {0x0C46, 0x0C48, 1},
+ {0x0C4A, 0x0C4D, 1},
+ {0x0C55, 0x0C56, 1},
+ {0x0C66, 0x0C6F, 1},
+ {0x0C82, 0x0C83, 1},
+ {0x0CBE, 0x0CC4, 1},
+ {0x0CC6, 0x0CC8, 1},
+ {0x0CCA, 0x0CCD, 1},
+ {0x0CD5, 0x0CD6, 1},
+ {0x0CE6, 0x0CEF, 1},
+ {0x0D02, 0x0D03, 1},
+ {0x0D3E, 0x0D43, 1},
+ {0x0D46, 0x0D48, 1},
+ {0x0D4A, 0x0D4D, 1},
+ {0x0D57, 0x0D57, 1},
+ {0x0D66, 0x0D6F, 1},
+ {0x0E31, 0x0E31, 1},
+ {0x0E34, 0x0E3A, 1},
+ {0x0E46, 0x0E46, 1},
+ {0x0E47, 0x0E4E, 1},
+ {0x0E50, 0x0E59, 1},
+ {0x0EB1, 0x0EB1, 1},
+ {0x0EB4, 0x0EB9, 1},
+ {0x0EBB, 0x0EBC, 1},
+ {0x0EC6, 0x0EC6, 1},
+ {0x0EC8, 0x0ECD, 1},
+ {0x0ED0, 0x0ED9, 1},
+ {0x0F18, 0x0F19, 1},
+ {0x0F20, 0x0F29, 1},
+ {0x0F35, 0x0F39, 2},
+ {0x0F3E, 0x0F3F, 1},
+ {0x0F71, 0x0F84, 1},
+ {0x0F86, 0x0F8B, 1},
+ {0x0F90, 0x0F95, 1},
+ {0x0F97, 0x0F97, 1},
+ {0x0F99, 0x0FAD, 1},
+ {0x0FB1, 0x0FB7, 1},
+ {0x0FB9, 0x0FB9, 1},
+ {0x20D0, 0x20DC, 1},
+ {0x20E1, 0x3005, 0x3005 - 0x20E1},
+ {0x302A, 0x302F, 1},
+ {0x3031, 0x3035, 1},
+ {0x3099, 0x309A, 1},
+ {0x309D, 0x309E, 1},
+ {0x30FC, 0x30FE, 1},
+ },
+}
+
+// HTMLEntity is an entity map containing translations for the
+// standard HTML entity characters.
+var HTMLEntity = htmlEntity
+
+var htmlEntity = map[string]string{
+ /*
+ hget http://www.w3.org/TR/html4/sgml/entities.html |
+ ssam '
+ ,y /\&gt;/ x/\&lt;(.|\n)+/ s/\n/ /g
+ ,x v/^\&lt;!ENTITY/d
+ ,s/\&lt;!ENTITY ([^ ]+) .*U\+([0-9A-F][0-9A-F][0-9A-F][0-9A-F]) .+/ "\1": "\\u\2",/g
+ '
+ */
+ "nbsp": "\u00A0",
+ "iexcl": "\u00A1",
+ "cent": "\u00A2",
+ "pound": "\u00A3",
+ "curren": "\u00A4",
+ "yen": "\u00A5",
+ "brvbar": "\u00A6",
+ "sect": "\u00A7",
+ "uml": "\u00A8",
+ "copy": "\u00A9",
+ "ordf": "\u00AA",
+ "laquo": "\u00AB",
+ "not": "\u00AC",
+ "shy": "\u00AD",
+ "reg": "\u00AE",
+ "macr": "\u00AF",
+ "deg": "\u00B0",
+ "plusmn": "\u00B1",
+ "sup2": "\u00B2",
+ "sup3": "\u00B3",
+ "acute": "\u00B4",
+ "micro": "\u00B5",
+ "para": "\u00B6",
+ "middot": "\u00B7",
+ "cedil": "\u00B8",
+ "sup1": "\u00B9",
+ "ordm": "\u00BA",
+ "raquo": "\u00BB",
+ "frac14": "\u00BC",
+ "frac12": "\u00BD",
+ "frac34": "\u00BE",
+ "iquest": "\u00BF",
+ "Agrave": "\u00C0",
+ "Aacute": "\u00C1",
+ "Acirc": "\u00C2",
+ "Atilde": "\u00C3",
+ "Auml": "\u00C4",
+ "Aring": "\u00C5",
+ "AElig": "\u00C6",
+ "Ccedil": "\u00C7",
+ "Egrave": "\u00C8",
+ "Eacute": "\u00C9",
+ "Ecirc": "\u00CA",
+ "Euml": "\u00CB",
+ "Igrave": "\u00CC",
+ "Iacute": "\u00CD",
+ "Icirc": "\u00CE",
+ "Iuml": "\u00CF",
+ "ETH": "\u00D0",
+ "Ntilde": "\u00D1",
+ "Ograve": "\u00D2",
+ "Oacute": "\u00D3",
+ "Ocirc": "\u00D4",
+ "Otilde": "\u00D5",
+ "Ouml": "\u00D6",
+ "times": "\u00D7",
+ "Oslash": "\u00D8",
+ "Ugrave": "\u00D9",
+ "Uacute": "\u00DA",
+ "Ucirc": "\u00DB",
+ "Uuml": "\u00DC",
+ "Yacute": "\u00DD",
+ "THORN": "\u00DE",
+ "szlig": "\u00DF",
+ "agrave": "\u00E0",
+ "aacute": "\u00E1",
+ "acirc": "\u00E2",
+ "atilde": "\u00E3",
+ "auml": "\u00E4",
+ "aring": "\u00E5",
+ "aelig": "\u00E6",
+ "ccedil": "\u00E7",
+ "egrave": "\u00E8",
+ "eacute": "\u00E9",
+ "ecirc": "\u00EA",
+ "euml": "\u00EB",
+ "igrave": "\u00EC",
+ "iacute": "\u00ED",
+ "icirc": "\u00EE",
+ "iuml": "\u00EF",
+ "eth": "\u00F0",
+ "ntilde": "\u00F1",
+ "ograve": "\u00F2",
+ "oacute": "\u00F3",
+ "ocirc": "\u00F4",
+ "otilde": "\u00F5",
+ "ouml": "\u00F6",
+ "divide": "\u00F7",
+ "oslash": "\u00F8",
+ "ugrave": "\u00F9",
+ "uacute": "\u00FA",
+ "ucirc": "\u00FB",
+ "uuml": "\u00FC",
+ "yacute": "\u00FD",
+ "thorn": "\u00FE",
+ "yuml": "\u00FF",
+ "fnof": "\u0192",
+ "Alpha": "\u0391",
+ "Beta": "\u0392",
+ "Gamma": "\u0393",
+ "Delta": "\u0394",
+ "Epsilon": "\u0395",
+ "Zeta": "\u0396",
+ "Eta": "\u0397",
+ "Theta": "\u0398",
+ "Iota": "\u0399",
+ "Kappa": "\u039A",
+ "Lambda": "\u039B",
+ "Mu": "\u039C",
+ "Nu": "\u039D",
+ "Xi": "\u039E",
+ "Omicron": "\u039F",
+ "Pi": "\u03A0",
+ "Rho": "\u03A1",
+ "Sigma": "\u03A3",
+ "Tau": "\u03A4",
+ "Upsilon": "\u03A5",
+ "Phi": "\u03A6",
+ "Chi": "\u03A7",
+ "Psi": "\u03A8",
+ "Omega": "\u03A9",
+ "alpha": "\u03B1",
+ "beta": "\u03B2",
+ "gamma": "\u03B3",
+ "delta": "\u03B4",
+ "epsilon": "\u03B5",
+ "zeta": "\u03B6",
+ "eta": "\u03B7",
+ "theta": "\u03B8",
+ "iota": "\u03B9",
+ "kappa": "\u03BA",
+ "lambda": "\u03BB",
+ "mu": "\u03BC",
+ "nu": "\u03BD",
+ "xi": "\u03BE",
+ "omicron": "\u03BF",
+ "pi": "\u03C0",
+ "rho": "\u03C1",
+ "sigmaf": "\u03C2",
+ "sigma": "\u03C3",
+ "tau": "\u03C4",
+ "upsilon": "\u03C5",
+ "phi": "\u03C6",
+ "chi": "\u03C7",
+ "psi": "\u03C8",
+ "omega": "\u03C9",
+ "thetasym": "\u03D1",
+ "upsih": "\u03D2",
+ "piv": "\u03D6",
+ "bull": "\u2022",
+ "hellip": "\u2026",
+ "prime": "\u2032",
+ "Prime": "\u2033",
+ "oline": "\u203E",
+ "frasl": "\u2044",
+ "weierp": "\u2118",
+ "image": "\u2111",
+ "real": "\u211C",
+ "trade": "\u2122",
+ "alefsym": "\u2135",
+ "larr": "\u2190",
+ "uarr": "\u2191",
+ "rarr": "\u2192",
+ "darr": "\u2193",
+ "harr": "\u2194",
+ "crarr": "\u21B5",
+ "lArr": "\u21D0",
+ "uArr": "\u21D1",
+ "rArr": "\u21D2",
+ "dArr": "\u21D3",
+ "hArr": "\u21D4",
+ "forall": "\u2200",
+ "part": "\u2202",
+ "exist": "\u2203",
+ "empty": "\u2205",
+ "nabla": "\u2207",
+ "isin": "\u2208",
+ "notin": "\u2209",
+ "ni": "\u220B",
+ "prod": "\u220F",
+ "sum": "\u2211",
+ "minus": "\u2212",
+ "lowast": "\u2217",
+ "radic": "\u221A",
+ "prop": "\u221D",
+ "infin": "\u221E",
+ "ang": "\u2220",
+ "and": "\u2227",
+ "or": "\u2228",
+ "cap": "\u2229",
+ "cup": "\u222A",
+ "int": "\u222B",
+ "there4": "\u2234",
+ "sim": "\u223C",
+ "cong": "\u2245",
+ "asymp": "\u2248",
+ "ne": "\u2260",
+ "equiv": "\u2261",
+ "le": "\u2264",
+ "ge": "\u2265",
+ "sub": "\u2282",
+ "sup": "\u2283",
+ "nsub": "\u2284",
+ "sube": "\u2286",
+ "supe": "\u2287",
+ "oplus": "\u2295",
+ "otimes": "\u2297",
+ "perp": "\u22A5",
+ "sdot": "\u22C5",
+ "lceil": "\u2308",
+ "rceil": "\u2309",
+ "lfloor": "\u230A",
+ "rfloor": "\u230B",
+ "lang": "\u2329",
+ "rang": "\u232A",
+ "loz": "\u25CA",
+ "spades": "\u2660",
+ "clubs": "\u2663",
+ "hearts": "\u2665",
+ "diams": "\u2666",
+ "quot": "\u0022",
+ "amp": "\u0026",
+ "lt": "\u003C",
+ "gt": "\u003E",
+ "OElig": "\u0152",
+ "oelig": "\u0153",
+ "Scaron": "\u0160",
+ "scaron": "\u0161",
+ "Yuml": "\u0178",
+ "circ": "\u02C6",
+ "tilde": "\u02DC",
+ "ensp": "\u2002",
+ "emsp": "\u2003",
+ "thinsp": "\u2009",
+ "zwnj": "\u200C",
+ "zwj": "\u200D",
+ "lrm": "\u200E",
+ "rlm": "\u200F",
+ "ndash": "\u2013",
+ "mdash": "\u2014",
+ "lsquo": "\u2018",
+ "rsquo": "\u2019",
+ "sbquo": "\u201A",
+ "ldquo": "\u201C",
+ "rdquo": "\u201D",
+ "bdquo": "\u201E",
+ "dagger": "\u2020",
+ "Dagger": "\u2021",
+ "permil": "\u2030",
+ "lsaquo": "\u2039",
+ "rsaquo": "\u203A",
+ "euro": "\u20AC",
+}
+
+// HTMLAutoClose is the set of HTML elements that
+// should be considered to close automatically.
+var HTMLAutoClose = htmlAutoClose
+
+var htmlAutoClose = []string{
+ /*
+ hget http://www.w3.org/TR/html4/loose.dtd |
+ 9 sed -n 's/<!ELEMENT ([^ ]*) +- O EMPTY.+/ "\1",/p' | tr A-Z a-z
+ */
+ "basefont",
+ "br",
+ "area",
+ "link",
+ "img",
+ "param",
+ "hr",
+ "input",
+ "col",
+ "frame",
+ "isindex",
+ "base",
+ "meta",
+}
+
+var (
+ esc_quot = []byte("&#34;") // shorter than "&quot;"
+ esc_apos = []byte("&#39;") // shorter than "&apos;"
+ esc_amp = []byte("&amp;")
+ esc_lt = []byte("&lt;")
+ esc_gt = []byte("&gt;")
+ esc_tab = []byte("&#x9;")
+ esc_nl = []byte("&#xA;")
+ esc_cr = []byte("&#xD;")
+ esc_fffd = []byte("\uFFFD") // Unicode replacement character
+)
+
+// EscapeText writes to w the properly escaped XML equivalent
+// of the plain text data s.
+func EscapeText(w io.Writer, s []byte) error {
+ return escapeText(w, s, true)
+}
+
+// escapeText writes to w the properly escaped XML equivalent
+// of the plain text data s. If escapeNewline is true, newline
+// characters will be escaped.
+func escapeText(w io.Writer, s []byte, escapeNewline bool) error {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRune(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = esc_quot
+ case '\'':
+ esc = esc_apos
+ case '&':
+ esc = esc_amp
+ case '<':
+ esc = esc_lt
+ case '>':
+ esc = esc_gt
+ case '\t':
+ esc = esc_tab
+ case '\n':
+ if !escapeNewline {
+ continue
+ }
+ esc = esc_nl
+ case '\r':
+ esc = esc_cr
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = esc_fffd
+ break
+ }
+ continue
+ }
+ if _, err := w.Write(s[last : i-width]); err != nil {
+ return err
+ }
+ if _, err := w.Write(esc); err != nil {
+ return err
+ }
+ last = i
+ }
+ if _, err := w.Write(s[last:]); err != nil {
+ return err
+ }
+ return nil
+}
+
+// EscapeString writes to p the properly escaped XML equivalent
+// of the plain text data s.
+func (p *printer) EscapeString(s string) {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRuneInString(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = esc_quot
+ case '\'':
+ esc = esc_apos
+ case '&':
+ esc = esc_amp
+ case '<':
+ esc = esc_lt
+ case '>':
+ esc = esc_gt
+ case '\t':
+ esc = esc_tab
+ case '\n':
+ esc = esc_nl
+ case '\r':
+ esc = esc_cr
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = esc_fffd
+ break
+ }
+ continue
+ }
+ p.WriteString(s[last : i-width])
+ p.Write(esc)
+ last = i
+ }
+ p.WriteString(s[last:])
+}
+
+// Escape is like EscapeText but omits the error return value.
+// It is provided for backwards compatibility with Go 1.0.
+// Code targeting Go 1.1 or later should use EscapeText.
+func Escape(w io.Writer, s []byte) {
+ EscapeText(w, s)
+}
+
+// procInst parses the `param="..."` or `param='...'`
+// value out of the provided string, returning "" if not found.
+func procInst(param, s string) string {
+ // TODO: this parsing is somewhat lame and not exact.
+ // It works for all actual cases, though.
+ param = param + "="
+ idx := strings.Index(s, param)
+ if idx == -1 {
+ return ""
+ }
+ v := s[idx+len(param):]
+ if v == "" {
+ return ""
+ }
+ if v[0] != '\'' && v[0] != '"' {
+ return ""
+ }
+ idx = strings.IndexRune(v[1:], rune(v[0]))
+ if idx == -1 {
+ return ""
+ }
+ return v[1 : idx+1]
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go
new file mode 100644
index 000000000..af4cf8ea8
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/internal/xml/xml_test.go
@@ -0,0 +1,752 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+ "unicode/utf8"
+)
+
+const testInput = `
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<body xmlns:foo="ns1" xmlns="ns2" xmlns:tag="ns3" ` +
+ "\r\n\t" + ` >
+ <hello lang="en">World &lt;&gt;&apos;&quot; &#x767d;&#40300;翔</hello>
+ <query>&何; &is-it;</query>
+ <goodbye />
+ <outer foo:attr="value" xmlns:tag="ns4">
+ <inner/>
+ </outer>
+ <tag:name>
+ <![CDATA[Some text here.]]>
+ </tag:name>
+</body><!-- missing final newline -->`
+
+var testEntity = map[string]string{"何": "What", "is-it": "is it?"}
+
+var rawTokens = []Token{
+ CharData("\n"),
+ ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
+ CharData("\n"),
+ Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
+ CharData("\n"),
+ StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
+ CharData("\n "),
+ StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
+ CharData("World <>'\" 白鵬翔"),
+ EndElement{Name{"", "hello"}},
+ CharData("\n "),
+ StartElement{Name{"", "query"}, []Attr{}},
+ CharData("What is it?"),
+ EndElement{Name{"", "query"}},
+ CharData("\n "),
+ StartElement{Name{"", "goodbye"}, []Attr{}},
+ EndElement{Name{"", "goodbye"}},
+ CharData("\n "),
+ StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
+ CharData("\n "),
+ StartElement{Name{"", "inner"}, []Attr{}},
+ EndElement{Name{"", "inner"}},
+ CharData("\n "),
+ EndElement{Name{"", "outer"}},
+ CharData("\n "),
+ StartElement{Name{"tag", "name"}, []Attr{}},
+ CharData("\n "),
+ CharData("Some text here."),
+ CharData("\n "),
+ EndElement{Name{"tag", "name"}},
+ CharData("\n"),
+ EndElement{Name{"", "body"}},
+ Comment(" missing final newline "),
+}
+
+var cookedTokens = []Token{
+ CharData("\n"),
+ ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
+ CharData("\n"),
+ Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
+ CharData("\n"),
+ StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
+ CharData("World <>'\" 白鵬翔"),
+ EndElement{Name{"ns2", "hello"}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "query"}, []Attr{}},
+ CharData("What is it?"),
+ EndElement{Name{"ns2", "query"}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "goodbye"}, []Attr{}},
+ EndElement{Name{"ns2", "goodbye"}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
+ CharData("\n "),
+ StartElement{Name{"ns2", "inner"}, []Attr{}},
+ EndElement{Name{"ns2", "inner"}},
+ CharData("\n "),
+ EndElement{Name{"ns2", "outer"}},
+ CharData("\n "),
+ StartElement{Name{"ns3", "name"}, []Attr{}},
+ CharData("\n "),
+ CharData("Some text here."),
+ CharData("\n "),
+ EndElement{Name{"ns3", "name"}},
+ CharData("\n"),
+ EndElement{Name{"ns2", "body"}},
+ Comment(" missing final newline "),
+}
+
+const testInputAltEncoding = `
+<?xml version="1.0" encoding="x-testing-uppercase"?>
+<TAG>VALUE</TAG>`
+
+var rawTokensAltEncoding = []Token{
+ CharData("\n"),
+ ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("value"),
+ EndElement{Name{"", "tag"}},
+}
+
+var xmlInput = []string{
+ // unexpected EOF cases
+ "<",
+ "<t",
+ "<t ",
+ "<t/",
+ "<!",
+ "<!-",
+ "<!--",
+ "<!--c-",
+ "<!--c--",
+ "<!d",
+ "<t></",
+ "<t></t",
+ "<?",
+ "<?p",
+ "<t a",
+ "<t a=",
+ "<t a='",
+ "<t a=''",
+ "<t/><![",
+ "<t/><![C",
+ "<t/><![CDATA[d",
+ "<t/><![CDATA[d]",
+ "<t/><![CDATA[d]]",
+
+ // other Syntax errors
+ "<>",
+ "<t/a",
+ "<0 />",
+ "<?0 >",
+ // "<!0 >", // let the Token() caller handle
+ "</0>",
+ "<t 0=''>",
+ "<t a='&'>",
+ "<t a='<'>",
+ "<t>&nbspc;</t>",
+ "<t a>",
+ "<t a=>",
+ "<t a=v>",
+ // "<![CDATA[d]]>", // let the Token() caller handle
+ "<t></e>",
+ "<t></>",
+ "<t></t!",
+ "<t>cdata]]></t>",
+}
+
+func TestRawToken(t *testing.T) {
+ d := NewDecoder(strings.NewReader(testInput))
+ d.Entity = testEntity
+ testRawToken(t, d, testInput, rawTokens)
+}
+
+const nonStrictInput = `
+<tag>non&entity</tag>
+<tag>&unknown;entity</tag>
+<tag>&#123</tag>
+<tag>&#zzz;</tag>
+<tag>&なまえ3;</tag>
+<tag>&lt-gt;</tag>
+<tag>&;</tag>
+<tag>&0a;</tag>
+`
+
+var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"}
+
+var nonStrictTokens = []Token{
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("non&entity"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&unknown;entity"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&#123"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&#zzz;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&なまえ3;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&lt-gt;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+ StartElement{Name{"", "tag"}, []Attr{}},
+ CharData("&0a;"),
+ EndElement{Name{"", "tag"}},
+ CharData("\n"),
+}
+
+func TestNonStrictRawToken(t *testing.T) {
+ d := NewDecoder(strings.NewReader(nonStrictInput))
+ d.Strict = false
+ testRawToken(t, d, nonStrictInput, nonStrictTokens)
+}
+
+type downCaser struct {
+ t *testing.T
+ r io.ByteReader
+}
+
+func (d *downCaser) ReadByte() (c byte, err error) {
+ c, err = d.r.ReadByte()
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ return
+}
+
+func (d *downCaser) Read(p []byte) (int, error) {
+ d.t.Fatalf("unexpected Read call on downCaser reader")
+ panic("unreachable")
+}
+
+func TestRawTokenAltEncoding(t *testing.T) {
+ d := NewDecoder(strings.NewReader(testInputAltEncoding))
+ d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
+ if charset != "x-testing-uppercase" {
+ t.Fatalf("unexpected charset %q", charset)
+ }
+ return &downCaser{t, input.(io.ByteReader)}, nil
+ }
+ testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding)
+}
+
+func TestRawTokenAltEncodingNoConverter(t *testing.T) {
+ d := NewDecoder(strings.NewReader(testInputAltEncoding))
+ token, err := d.RawToken()
+ if token == nil {
+ t.Fatalf("expected a token on first RawToken call")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ token, err = d.RawToken()
+ if token != nil {
+ t.Errorf("expected a nil token; got %#v", token)
+ }
+ if err == nil {
+ t.Fatalf("expected an error on second RawToken call")
+ }
+ const encoding = "x-testing-uppercase"
+ if !strings.Contains(err.Error(), encoding) {
+ t.Errorf("expected error to contain %q; got error: %v",
+ encoding, err)
+ }
+}
+
+func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) {
+ lastEnd := int64(0)
+ for i, want := range rawTokens {
+ start := d.InputOffset()
+ have, err := d.RawToken()
+ end := d.InputOffset()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ var shave, swant string
+ if _, ok := have.(CharData); ok {
+ shave = fmt.Sprintf("CharData(%q)", have)
+ } else {
+ shave = fmt.Sprintf("%#v", have)
+ }
+ if _, ok := want.(CharData); ok {
+ swant = fmt.Sprintf("CharData(%q)", want)
+ } else {
+ swant = fmt.Sprintf("%#v", want)
+ }
+ t.Errorf("token %d = %s, want %s", i, shave, swant)
+ }
+
+ // Check that InputOffset returned actual token.
+ switch {
+ case start < lastEnd:
+ t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have)
+ case start >= end:
+ // Special case: EndElement can be synthesized.
+ if start == end && end == lastEnd {
+ break
+ }
+ t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have)
+ case end > int64(len(raw)):
+ t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have)
+ default:
+ text := raw[start:end]
+ if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) {
+ t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have)
+ }
+ }
+ lastEnd = end
+ }
+}
+
+// Ensure that directives (specifically !DOCTYPE) include the complete
+// text of any nested directives, noting that < and > do not change
+// nesting depth if they are in single or double quotes.
+
+var nestedDirectivesInput = `
+<!DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
+<!DOCTYPE [<!ENTITY xlt ">">]>
+<!DOCTYPE [<!ENTITY xlt "<">]>
+<!DOCTYPE [<!ENTITY xlt '>'>]>
+<!DOCTYPE [<!ENTITY xlt '<'>]>
+<!DOCTYPE [<!ENTITY xlt '">'>]>
+<!DOCTYPE [<!ENTITY xlt "'<">]>
+`
+
+var nestedDirectivesTokens = []Token{
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt ">">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt "<">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt '>'>]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt '<'>]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt '">'>]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY xlt "'<">]`),
+ CharData("\n"),
+}
+
+func TestNestedDirectives(t *testing.T) {
+ d := NewDecoder(strings.NewReader(nestedDirectivesInput))
+
+ for i, want := range nestedDirectivesTokens {
+ have, err := d.Token()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("token %d = %#v want %#v", i, have, want)
+ }
+ }
+}
+
+func TestToken(t *testing.T) {
+ d := NewDecoder(strings.NewReader(testInput))
+ d.Entity = testEntity
+
+ for i, want := range cookedTokens {
+ have, err := d.Token()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("token %d = %#v want %#v", i, have, want)
+ }
+ }
+}
+
+func TestSyntax(t *testing.T) {
+ for i := range xmlInput {
+ d := NewDecoder(strings.NewReader(xmlInput[i]))
+ var err error
+ for _, err = d.Token(); err == nil; _, err = d.Token() {
+ }
+ if _, ok := err.(*SyntaxError); !ok {
+ t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i])
+ }
+ }
+}
+
+type allScalars struct {
+ True1 bool
+ True2 bool
+ False1 bool
+ False2 bool
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint int
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Uintptr uintptr
+ Float32 float32
+ Float64 float64
+ String string
+ PtrString *string
+}
+
+var all = allScalars{
+ True1: true,
+ True2: true,
+ False1: false,
+ False2: false,
+ Int: 1,
+ Int8: -2,
+ Int16: 3,
+ Int32: -4,
+ Int64: 5,
+ Uint: 6,
+ Uint8: 7,
+ Uint16: 8,
+ Uint32: 9,
+ Uint64: 10,
+ Uintptr: 11,
+ Float32: 13.0,
+ Float64: 14.0,
+ String: "15",
+ PtrString: &sixteen,
+}
+
+var sixteen = "16"
+
+const testScalarsInput = `<allscalars>
+ <True1>true</True1>
+ <True2>1</True2>
+ <False1>false</False1>
+ <False2>0</False2>
+ <Int>1</Int>
+ <Int8>-2</Int8>
+ <Int16>3</Int16>
+ <Int32>-4</Int32>
+ <Int64>5</Int64>
+ <Uint>6</Uint>
+ <Uint8>7</Uint8>
+ <Uint16>8</Uint16>
+ <Uint32>9</Uint32>
+ <Uint64>10</Uint64>
+ <Uintptr>11</Uintptr>
+ <Float>12.0</Float>
+ <Float32>13.0</Float32>
+ <Float64>14.0</Float64>
+ <String>15</String>
+ <PtrString>16</PtrString>
+</allscalars>`
+
+func TestAllScalars(t *testing.T) {
+ var a allScalars
+ err := Unmarshal([]byte(testScalarsInput), &a)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(a, all) {
+ t.Errorf("have %+v want %+v", a, all)
+ }
+}
+
+type item struct {
+ Field_a string
+}
+
+func TestIssue569(t *testing.T) {
+ data := `<item><Field_a>abcd</Field_a></item>`
+ var i item
+ err := Unmarshal([]byte(data), &i)
+
+ if err != nil || i.Field_a != "abcd" {
+ t.Fatal("Expecting abcd")
+ }
+}
+
+func TestUnquotedAttrs(t *testing.T) {
+ data := "<tag attr=azAZ09:-_\t>"
+ d := NewDecoder(strings.NewReader(data))
+ d.Strict = false
+ token, err := d.Token()
+ if _, ok := err.(*SyntaxError); ok {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if token.(StartElement).Name.Local != "tag" {
+ t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
+ }
+ attr := token.(StartElement).Attr[0]
+ if attr.Value != "azAZ09:-_" {
+ t.Errorf("Unexpected attribute value: %v", attr.Value)
+ }
+ if attr.Name.Local != "attr" {
+ t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
+ }
+}
+
+func TestValuelessAttrs(t *testing.T) {
+ tests := [][3]string{
+ {"<p nowrap>", "p", "nowrap"},
+ {"<p nowrap >", "p", "nowrap"},
+ {"<input checked/>", "input", "checked"},
+ {"<input checked />", "input", "checked"},
+ }
+ for _, test := range tests {
+ d := NewDecoder(strings.NewReader(test[0]))
+ d.Strict = false
+ token, err := d.Token()
+ if _, ok := err.(*SyntaxError); ok {
+ t.Errorf("Unexpected error: %v", err)
+ }
+ if token.(StartElement).Name.Local != test[1] {
+ t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
+ }
+ attr := token.(StartElement).Attr[0]
+ if attr.Value != test[2] {
+ t.Errorf("Unexpected attribute value: %v", attr.Value)
+ }
+ if attr.Name.Local != test[2] {
+ t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
+ }
+ }
+}
+
+func TestCopyTokenCharData(t *testing.T) {
+ data := []byte("same data")
+ var tok1 Token = CharData(data)
+ tok2 := CopyToken(tok1)
+ if !reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(CharData) != CharData")
+ }
+ data[1] = 'o'
+ if reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(CharData) uses same buffer.")
+ }
+}
+
+func TestCopyTokenStartElement(t *testing.T) {
+ elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}
+ var tok1 Token = elt
+ tok2 := CopyToken(tok1)
+ if tok1.(StartElement).Attr[0].Value != "en" {
+ t.Error("CopyToken overwrote Attr[0]")
+ }
+ if !reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(StartElement) != StartElement")
+ }
+ tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"}
+ if reflect.DeepEqual(tok1, tok2) {
+ t.Error("CopyToken(CharData) uses same buffer.")
+ }
+}
+
+func TestSyntaxErrorLineNum(t *testing.T) {
+ testInput := "<P>Foo<P>\n\n<P>Bar</>\n"
+ d := NewDecoder(strings.NewReader(testInput))
+ var err error
+ for _, err = d.Token(); err == nil; _, err = d.Token() {
+ }
+ synerr, ok := err.(*SyntaxError)
+ if !ok {
+ t.Error("Expected SyntaxError.")
+ }
+ if synerr.Line != 3 {
+ t.Error("SyntaxError didn't have correct line number.")
+ }
+}
+
+func TestTrailingRawToken(t *testing.T) {
+ input := `<FOO></FOO> `
+ d := NewDecoder(strings.NewReader(input))
+ var err error
+ for _, err = d.RawToken(); err == nil; _, err = d.RawToken() {
+ }
+ if err != io.EOF {
+ t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err)
+ }
+}
+
+func TestTrailingToken(t *testing.T) {
+ input := `<FOO></FOO> `
+ d := NewDecoder(strings.NewReader(input))
+ var err error
+ for _, err = d.Token(); err == nil; _, err = d.Token() {
+ }
+ if err != io.EOF {
+ t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
+ }
+}
+
+func TestEntityInsideCDATA(t *testing.T) {
+ input := `<test><![CDATA[ &val=foo ]]></test>`
+ d := NewDecoder(strings.NewReader(input))
+ var err error
+ for _, err = d.Token(); err == nil; _, err = d.Token() {
+ }
+ if err != io.EOF {
+ t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
+ }
+}
+
+var characterTests = []struct {
+ in string
+ err string
+}{
+ {"\x12<doc/>", "illegal character code U+0012"},
+ {"<?xml version=\"1.0\"?>\x0b<doc/>", "illegal character code U+000B"},
+ {"\xef\xbf\xbe<doc/>", "illegal character code U+FFFE"},
+ {"<?xml version=\"1.0\"?><doc>\r\n<hiya/>\x07<toots/></doc>", "illegal character code U+0007"},
+ {"<?xml version=\"1.0\"?><doc \x12='value'>what's up</doc>", "expected attribute name in element"},
+ {"<doc>&abc\x01;</doc>", "invalid character entity &abc (no semicolon)"},
+ {"<doc>&\x01;</doc>", "invalid character entity & (no semicolon)"},
+ {"<doc>&\xef\xbf\xbe;</doc>", "invalid character entity &\uFFFE;"},
+ {"<doc>&hello;</doc>", "invalid character entity &hello;"},
+}
+
+func TestDisallowedCharacters(t *testing.T) {
+
+ for i, tt := range characterTests {
+ d := NewDecoder(strings.NewReader(tt.in))
+ var err error
+
+ for err == nil {
+ _, err = d.Token()
+ }
+ synerr, ok := err.(*SyntaxError)
+ if !ok {
+ t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err)
+ }
+ if synerr.Msg != tt.err {
+ t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg)
+ }
+ }
+}
+
+type procInstEncodingTest struct {
+ expect, got string
+}
+
+var procInstTests = []struct {
+ input string
+ expect [2]string
+}{
+ {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}},
+ {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}},
+ {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}},
+ {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}},
+ {`encoding="FOO" `, [2]string{"", "FOO"}},
+}
+
+func TestProcInstEncoding(t *testing.T) {
+ for _, test := range procInstTests {
+ if got := procInst("version", test.input); got != test.expect[0] {
+ t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0])
+ }
+ if got := procInst("encoding", test.input); got != test.expect[1] {
+ t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1])
+ }
+ }
+}
+
+// Ensure that directives with comments include the complete
+// text of any nested directives.
+
+var directivesWithCommentsInput = `
+<!DOCTYPE [<!-- a comment --><!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
+<!DOCTYPE [<!ENTITY go "Golang"><!-- a comment-->]>
+<!DOCTYPE <!-> <!> <!----> <!-->--> <!--->--> [<!ENTITY go "Golang"><!-- a comment-->]>
+`
+
+var directivesWithCommentsTokens = []Token{
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE [<!ENTITY go "Golang">]`),
+ CharData("\n"),
+ Directive(`DOCTYPE <!-> <!> [<!ENTITY go "Golang">]`),
+ CharData("\n"),
+}
+
+func TestDirectivesWithComments(t *testing.T) {
+ d := NewDecoder(strings.NewReader(directivesWithCommentsInput))
+
+ for i, want := range directivesWithCommentsTokens {
+ have, err := d.Token()
+ if err != nil {
+ t.Fatalf("token %d: unexpected error: %s", i, err)
+ }
+ if !reflect.DeepEqual(have, want) {
+ t.Errorf("token %d = %#v want %#v", i, have, want)
+ }
+ }
+}
+
+// Writer whose Write method always returns an error.
+type errWriter struct{}
+
+func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") }
+
+func TestEscapeTextIOErrors(t *testing.T) {
+ expectErr := "unwritable"
+ err := EscapeText(errWriter{}, []byte{'A'})
+
+ if err == nil || err.Error() != expectErr {
+ t.Errorf("have %v, want %v", err, expectErr)
+ }
+}
+
+func TestEscapeTextInvalidChar(t *testing.T) {
+ input := []byte("A \x00 terminated string.")
+ expected := "A \uFFFD terminated string."
+
+ buff := new(bytes.Buffer)
+ if err := EscapeText(buff, input); err != nil {
+ t.Fatalf("have %v, want nil", err)
+ }
+ text := buff.String()
+
+ if text != expected {
+ t.Errorf("have %v, want %v", text, expected)
+ }
+}
+
+func TestIssue5880(t *testing.T) {
+ type T []byte
+ data, err := Marshal(T{192, 168, 0, 1})
+ if err != nil {
+ t.Errorf("Marshal error: %v", err)
+ }
+ if !utf8.Valid(data) {
+ t.Errorf("Marshal generated invalid UTF-8: %x", data)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/litmus_test_server.go
new file mode 100644
index 000000000..514db5dd1
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/litmus_test_server.go
@@ -0,0 +1,94 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+This program is a server for the WebDAV 'litmus' compliance test at
+http://www.webdav.org/neon/litmus/
+To run the test:
+
+go run litmus_test_server.go
+
+and separately, from the downloaded litmus-xxx directory:
+
+make URL=http://localhost:9999/ check
+*/
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/webdav"
+)
+
+var port = flag.Int("port", 9999, "server port")
+
+func main() {
+ flag.Parse()
+ log.SetFlags(0)
+ h := &webdav.Handler{
+ FileSystem: webdav.NewMemFS(),
+ LockSystem: webdav.NewMemLS(),
+ Logger: func(r *http.Request, err error) {
+ litmus := r.Header.Get("X-Litmus")
+ if len(litmus) > 19 {
+ litmus = litmus[:16] + "..."
+ }
+
+ switch r.Method {
+ case "COPY", "MOVE":
+ dst := ""
+ if u, err := url.Parse(r.Header.Get("Destination")); err == nil {
+ dst = u.Path
+ }
+ o := r.Header.Get("Overwrite")
+ log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err)
+ default:
+ log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err)
+ }
+ },
+ }
+
+ // The next line would normally be:
+ // http.Handle("/", h)
+ // but we wrap that HTTP handler h to cater for a special case.
+ //
+ // The propfind_invalid2 litmus test case expects an empty namespace prefix
+ // declaration to be an error. The FAQ in the webdav litmus test says:
+ //
+ // "What does the "propfind_invalid2" test check for?...
+ //
+ // If a request was sent with an XML body which included an empty namespace
+ // prefix declaration (xmlns:ns1=""), then the server must reject that with
+ // a "400 Bad Request" response, as it is invalid according to the XML
+ // Namespace specification."
+ //
+ // On the other hand, the Go standard library's encoding/xml package
+ // accepts an empty xmlns namespace, as per the discussion at
+ // https://github.com/golang/go/issues/8068
+ //
+ // Empty namespaces seem disallowed in the second (2006) edition of the XML
+ // standard, but allowed in a later edition. The grammar differs between
+ // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and
+ // http://www.w3.org/TR/REC-xml-names/#dt-prefix
+ //
+ // Thus, we assume that the propfind_invalid2 test is obsolete, and
+ // hard-code the 400 Bad Request response that the test expects.
+ http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" {
+ http.Error(w, "400 Bad Request", http.StatusBadRequest)
+ return
+ }
+ h.ServeHTTP(w, r)
+ }))
+
+ addr := fmt.Sprintf(":%d", *port)
+ log.Printf("Serving %v", addr)
+ log.Fatal(http.ListenAndServe(addr, nil))
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/lock.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/lock.go
new file mode 100644
index 000000000..344ac5cea
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/lock.go
@@ -0,0 +1,445 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "container/heap"
+ "errors"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ // ErrConfirmationFailed is returned by a LockSystem's Confirm method.
+ ErrConfirmationFailed = errors.New("webdav: confirmation failed")
+ // ErrForbidden is returned by a LockSystem's Unlock method.
+ ErrForbidden = errors.New("webdav: forbidden")
+ // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
+ ErrLocked = errors.New("webdav: locked")
+ // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
+ ErrNoSuchLock = errors.New("webdav: no such lock")
+)
+
+// Condition can match a WebDAV resource, based on a token or ETag.
+// Exactly one of Token and ETag should be non-empty.
+type Condition struct {
+ Not bool
+ Token string
+ ETag string
+}
+
+// LockSystem manages access to a collection of named resources. The elements
+// in a lock name are separated by slash ('/', U+002F) characters, regardless
+// of host operating system convention.
+type LockSystem interface {
+ // Confirm confirms that the caller can claim all of the locks specified by
+ // the given conditions, and that holding the union of all of those locks
+ // gives exclusive access to all of the named resources. Up to two resources
+ // can be named. Empty names are ignored.
+ //
+ // Exactly one of release and err will be non-nil. If release is non-nil,
+ // all of the requested locks are held until release is called. Calling
+ // release does not unlock the lock, in the WebDAV UNLOCK sense, but once
+ // Confirm has confirmed that a lock claim is valid, that lock cannot be
+ // Confirmed again until it has been released.
+ //
+ // If Confirm returns ErrConfirmationFailed then the Handler will continue
+ // to try any other set of locks presented (a WebDAV HTTP request can
+ // present more than one set of locks). If it returns any other non-nil
+ // error, the Handler will write a "500 Internal Server Error" HTTP status.
+ Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
+
+ // Create creates a lock with the given depth, duration, owner and root
+ // (name). The depth will either be negative (meaning infinite) or zero.
+ //
+ // If Create returns ErrLocked then the Handler will write a "423 Locked"
+ // HTTP status. If it returns any other non-nil error, the Handler will
+ // write a "500 Internal Server Error" HTTP status.
+ //
+ // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
+ // when to use each error.
+ //
+ // The token returned identifies the created lock. It should be an absolute
+ // URI as defined by RFC 3986, Section 4.3. In particular, it should not
+ // contain whitespace.
+ Create(now time.Time, details LockDetails) (token string, err error)
+
+ // Refresh refreshes the lock with the given token.
+ //
+ // If Refresh returns ErrLocked then the Handler will write a "423 Locked"
+ // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
+ // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
+ // error, the Handler will write a "500 Internal Server Error" HTTP status.
+ //
+ // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
+ // when to use each error.
+ Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
+
+ // Unlock unlocks the lock with the given token.
+ //
+ // If Unlock returns ErrForbidden then the Handler will write a "403
+ // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
+ // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
+ // then the Handler will write a "409 Conflict" HTTP Status. If it returns
+ // any other non-nil error, the Handler will write a "500 Internal Server
+ // Error" HTTP status.
+ //
+ // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
+ // when to use each error.
+ Unlock(now time.Time, token string) error
+}
+
+// LockDetails are a lock's metadata.
+type LockDetails struct {
+ // Root is the root resource name being locked. For a zero-depth lock, the
+ // root is the only resource being locked.
+ Root string
+ // Duration is the lock timeout. A negative duration means infinite.
+ Duration time.Duration
+ // OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
+ //
+ // TODO: does the "verbatim" nature play well with XML namespaces?
+ // Does the OwnerXML field need to have more structure? See
+ // https://codereview.appspot.com/175140043/#msg2
+ OwnerXML string
+ // ZeroDepth is whether the lock has zero depth. If it does not have zero
+ // depth, it has infinite depth.
+ ZeroDepth bool
+}
+
+// NewMemLS returns a new in-memory LockSystem.
+func NewMemLS() LockSystem {
+ return &memLS{
+ byName: make(map[string]*memLSNode),
+ byToken: make(map[string]*memLSNode),
+ gen: uint64(time.Now().Unix()),
+ }
+}
+
+type memLS struct {
+ mu sync.Mutex
+ byName map[string]*memLSNode
+ byToken map[string]*memLSNode
+ gen uint64
+ // byExpiry only contains those nodes whose LockDetails have a finite
+ // Duration and are yet to expire.
+ byExpiry byExpiry
+}
+
+func (m *memLS) nextToken() string {
+ m.gen++
+ return strconv.FormatUint(m.gen, 10)
+}
+
+func (m *memLS) collectExpiredNodes(now time.Time) {
+ for len(m.byExpiry) > 0 {
+ if now.Before(m.byExpiry[0].expiry) {
+ break
+ }
+ m.remove(m.byExpiry[0])
+ }
+}
+
+func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+
+ var n0, n1 *memLSNode
+ if name0 != "" {
+ if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
+ return nil, ErrConfirmationFailed
+ }
+ }
+ if name1 != "" {
+ if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
+ return nil, ErrConfirmationFailed
+ }
+ }
+
+ // Don't hold the same node twice.
+ if n1 == n0 {
+ n1 = nil
+ }
+
+ if n0 != nil {
+ m.hold(n0)
+ }
+ if n1 != nil {
+ m.hold(n1)
+ }
+ return func() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if n1 != nil {
+ m.unhold(n1)
+ }
+ if n0 != nil {
+ m.unhold(n0)
+ }
+ }, nil
+}
+
+// lookup returns the node n that locks the named resource, provided that n
+// matches at least one of the given conditions and that lock isn't held by
+// another party. Otherwise, it returns nil.
+//
+// n may be a parent of the named resource, if n is an infinite depth lock.
+func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
+ // TODO: support Condition.Not and Condition.ETag.
+ for _, c := range conditions {
+ n = m.byToken[c.Token]
+ if n == nil || n.held {
+ continue
+ }
+ if name == n.details.Root {
+ return n
+ }
+ if n.details.ZeroDepth {
+ continue
+ }
+ if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
+ return n
+ }
+ }
+ return nil
+}
+
+func (m *memLS) hold(n *memLSNode) {
+ if n.held {
+ panic("webdav: memLS inconsistent held state")
+ }
+ n.held = true
+ if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
+ heap.Remove(&m.byExpiry, n.byExpiryIndex)
+ }
+}
+
+func (m *memLS) unhold(n *memLSNode) {
+ if !n.held {
+ panic("webdav: memLS inconsistent held state")
+ }
+ n.held = false
+ if n.details.Duration >= 0 {
+ heap.Push(&m.byExpiry, n)
+ }
+}
+
+func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+ details.Root = slashClean(details.Root)
+
+ if !m.canCreate(details.Root, details.ZeroDepth) {
+ return "", ErrLocked
+ }
+ n := m.create(details.Root)
+ n.token = m.nextToken()
+ m.byToken[n.token] = n
+ n.details = details
+ if n.details.Duration >= 0 {
+ n.expiry = now.Add(n.details.Duration)
+ heap.Push(&m.byExpiry, n)
+ }
+ return n.token, nil
+}
+
+func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+
+ n := m.byToken[token]
+ if n == nil {
+ return LockDetails{}, ErrNoSuchLock
+ }
+ if n.held {
+ return LockDetails{}, ErrLocked
+ }
+ if n.byExpiryIndex >= 0 {
+ heap.Remove(&m.byExpiry, n.byExpiryIndex)
+ }
+ n.details.Duration = duration
+ if n.details.Duration >= 0 {
+ n.expiry = now.Add(n.details.Duration)
+ heap.Push(&m.byExpiry, n)
+ }
+ return n.details, nil
+}
+
+func (m *memLS) Unlock(now time.Time, token string) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+
+ n := m.byToken[token]
+ if n == nil {
+ return ErrNoSuchLock
+ }
+ if n.held {
+ return ErrLocked
+ }
+ m.remove(n)
+ return nil
+}
+
+func (m *memLS) canCreate(name string, zeroDepth bool) bool {
+ return walkToRoot(name, func(name0 string, first bool) bool {
+ n := m.byName[name0]
+ if n == nil {
+ return true
+ }
+ if first {
+ if n.token != "" {
+ // The target node is already locked.
+ return false
+ }
+ if !zeroDepth {
+ // The requested lock depth is infinite, and the fact that n exists
+ // (n != nil) means that a descendent of the target node is locked.
+ return false
+ }
+ } else if n.token != "" && !n.details.ZeroDepth {
+ // An ancestor of the target node is locked with infinite depth.
+ return false
+ }
+ return true
+ })
+}
+
+func (m *memLS) create(name string) (ret *memLSNode) {
+ walkToRoot(name, func(name0 string, first bool) bool {
+ n := m.byName[name0]
+ if n == nil {
+ n = &memLSNode{
+ details: LockDetails{
+ Root: name0,
+ },
+ byExpiryIndex: -1,
+ }
+ m.byName[name0] = n
+ }
+ n.refCount++
+ if first {
+ ret = n
+ }
+ return true
+ })
+ return ret
+}
+
+func (m *memLS) remove(n *memLSNode) {
+ delete(m.byToken, n.token)
+ n.token = ""
+ walkToRoot(n.details.Root, func(name0 string, first bool) bool {
+ x := m.byName[name0]
+ x.refCount--
+ if x.refCount == 0 {
+ delete(m.byName, name0)
+ }
+ return true
+ })
+ if n.byExpiryIndex >= 0 {
+ heap.Remove(&m.byExpiry, n.byExpiryIndex)
+ }
+}
+
+func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
+ for first := true; ; first = false {
+ if !f(name, first) {
+ return false
+ }
+ if name == "/" {
+ break
+ }
+ name = name[:strings.LastIndex(name, "/")]
+ if name == "" {
+ name = "/"
+ }
+ }
+ return true
+}
+
+type memLSNode struct {
+ // details are the lock metadata. Even if this node's name is not explicitly locked,
+ // details.Root will still equal the node's name.
+ details LockDetails
+ // token is the unique identifier for this node's lock. An empty token means that
+ // this node is not explicitly locked.
+ token string
+ // refCount is the number of self-or-descendent nodes that are explicitly locked.
+ refCount int
+ // expiry is when this node's lock expires.
+ expiry time.Time
+ // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
+ // if this node does not expire, or has expired.
+ byExpiryIndex int
+ // held is whether this node's lock is actively held by a Confirm call.
+ held bool
+}
+
+type byExpiry []*memLSNode
+
+func (b *byExpiry) Len() int {
+ return len(*b)
+}
+
+func (b *byExpiry) Less(i, j int) bool {
+ return (*b)[i].expiry.Before((*b)[j].expiry)
+}
+
+func (b *byExpiry) Swap(i, j int) {
+ (*b)[i], (*b)[j] = (*b)[j], (*b)[i]
+ (*b)[i].byExpiryIndex = i
+ (*b)[j].byExpiryIndex = j
+}
+
+func (b *byExpiry) Push(x interface{}) {
+ n := x.(*memLSNode)
+ n.byExpiryIndex = len(*b)
+ *b = append(*b, n)
+}
+
+func (b *byExpiry) Pop() interface{} {
+ i := len(*b) - 1
+ n := (*b)[i]
+ (*b)[i] = nil
+ n.byExpiryIndex = -1
+ *b = (*b)[:i]
+ return n
+}
+
+const infiniteTimeout = -1
+
+// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
+// empty, an infiniteTimeout is returned.
+func parseTimeout(s string) (time.Duration, error) {
+ if s == "" {
+ return infiniteTimeout, nil
+ }
+ if i := strings.IndexByte(s, ','); i >= 0 {
+ s = s[:i]
+ }
+ s = strings.TrimSpace(s)
+ if s == "Infinite" {
+ return infiniteTimeout, nil
+ }
+ const pre = "Second-"
+ if !strings.HasPrefix(s, pre) {
+ return 0, errInvalidTimeout
+ }
+ s = s[len(pre):]
+ if s == "" || s[0] < '0' || '9' < s[0] {
+ return 0, errInvalidTimeout
+ }
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || 1<<32-1 < n {
+ return 0, errInvalidTimeout
+ }
+ return time.Duration(n) * time.Second, nil
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/lock_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/lock_test.go
new file mode 100644
index 000000000..5cf14cda4
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/lock_test.go
@@ -0,0 +1,731 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "fmt"
+ "math/rand"
+ "path"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestWalkToRoot(t *testing.T) {
+ testCases := []struct {
+ name string
+ want []string
+ }{{
+ "/a/b/c/d",
+ []string{
+ "/a/b/c/d",
+ "/a/b/c",
+ "/a/b",
+ "/a",
+ "/",
+ },
+ }, {
+ "/a",
+ []string{
+ "/a",
+ "/",
+ },
+ }, {
+ "/",
+ []string{
+ "/",
+ },
+ }}
+
+ for _, tc := range testCases {
+ var got []string
+ if !walkToRoot(tc.name, func(name0 string, first bool) bool {
+ if first != (len(got) == 0) {
+ t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got))
+ return false
+ }
+ got = append(got, name0)
+ return true
+ }) {
+ continue
+ }
+ if !reflect.DeepEqual(got, tc.want) {
+ t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want)
+ }
+ }
+}
+
+var lockTestDurations = []time.Duration{
+ infiniteTimeout, // infiniteTimeout means to never expire.
+ 0, // A zero duration means to expire immediately.
+ 100 * time.Hour, // A very large duration will not expire in these tests.
+}
+
+// lockTestNames are the names of a set of mutually compatible locks. For each
+// name fragment:
+// - _ means no explicit lock.
+// - i means an infinite-depth lock,
+// - z means a zero-depth lock,
+var lockTestNames = []string{
+ "/_/_/_/_/z",
+ "/_/_/i",
+ "/_/z",
+ "/_/z/i",
+ "/_/z/z",
+ "/_/z/_/i",
+ "/_/z/_/z",
+ "/i",
+ "/z",
+ "/z/_/i",
+ "/z/_/z",
+}
+
+func lockTestZeroDepth(name string) bool {
+ switch name[len(name)-1] {
+ case 'i':
+ return false
+ case 'z':
+ return true
+ }
+ panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name))
+}
+
+func TestMemLSCanCreate(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+
+ for _, name := range lockTestNames {
+ _, err := m.Create(now, LockDetails{
+ Root: name,
+ Duration: infiniteTimeout,
+ ZeroDepth: lockTestZeroDepth(name),
+ })
+ if err != nil {
+ t.Fatalf("creating lock for %q: %v", name, err)
+ }
+ }
+
+ wantCanCreate := func(name string, zeroDepth bool) bool {
+ for _, n := range lockTestNames {
+ switch {
+ case n == name:
+ // An existing lock has the same name as the proposed lock.
+ return false
+ case strings.HasPrefix(n, name):
+ // An existing lock would be a child of the proposed lock,
+ // which conflicts if the proposed lock has infinite depth.
+ if !zeroDepth {
+ return false
+ }
+ case strings.HasPrefix(name, n):
+ // An existing lock would be an ancestor of the proposed lock,
+ // which conflicts if the ancestor has infinite depth.
+ if n[len(n)-1] == 'i' {
+ return false
+ }
+ }
+ }
+ return true
+ }
+
+ var check func(int, string)
+ check = func(recursion int, name string) {
+ for _, zeroDepth := range []bool{false, true} {
+ got := m.canCreate(name, zeroDepth)
+ want := wantCanCreate(name, zeroDepth)
+ if got != want {
+ t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want)
+ }
+ }
+ if recursion == 6 {
+ return
+ }
+ if name != "/" {
+ name += "/"
+ }
+ for _, c := range "_iz" {
+ check(recursion+1, name+string(c))
+ }
+ }
+ check(0, "/")
+}
+
+func TestMemLSLookup(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+
+ badToken := m.nextToken()
+ t.Logf("badToken=%q", badToken)
+
+ for _, name := range lockTestNames {
+ token, err := m.Create(now, LockDetails{
+ Root: name,
+ Duration: infiniteTimeout,
+ ZeroDepth: lockTestZeroDepth(name),
+ })
+ if err != nil {
+ t.Fatalf("creating lock for %q: %v", name, err)
+ }
+ t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token)
+ }
+
+ baseNames := append([]string{"/a", "/b/c"}, lockTestNames...)
+ for _, baseName := range baseNames {
+ for _, suffix := range []string{"", "/0", "/1/2/3"} {
+ name := baseName + suffix
+
+ goodToken := ""
+ base := m.byName[baseName]
+ if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) {
+ goodToken = base.token
+ }
+
+ for _, token := range []string{badToken, goodToken} {
+ if token == "" {
+ continue
+ }
+
+ got := m.lookup(name, Condition{Token: token})
+ want := base
+ if token == badToken {
+ want = nil
+ }
+ if got != want {
+ t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p",
+ name, token, token == badToken, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestMemLSConfirm(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+ alice, err := m.Create(now, LockDetails{
+ Root: "/alice",
+ Duration: infiniteTimeout,
+ ZeroDepth: false,
+ })
+ tweedle, err := m.Create(now, LockDetails{
+ Root: "/tweedle",
+ Duration: infiniteTimeout,
+ ZeroDepth: false,
+ })
+ if err != nil {
+ t.Fatalf("Create: %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Create: inconsistent state: %v", err)
+ }
+
+ // Test a mismatch between name and condition.
+ _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice})
+ if err != ErrConfirmationFailed {
+ t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (mismatch): inconsistent state: %v", err)
+ }
+
+ // Test two names (that fall under the same lock) in the one Confirm call.
+ release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle})
+ if err != nil {
+ t.Fatalf("Confirm (twins): %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (twins): inconsistent state: %v", err)
+ }
+ release()
+ if err := m.consistent(); err != nil {
+ t.Fatalf("release (twins): inconsistent state: %v", err)
+ }
+
+ // Test the same two names in overlapping Confirm / release calls.
+ releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle})
+ if err != nil {
+ t.Fatalf("Confirm (sequence #0): %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err)
+ }
+
+ _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
+ if err != ErrConfirmationFailed {
+ t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err)
+ }
+
+ releaseDee()
+ if err := m.consistent(); err != nil {
+ t.Fatalf("release (sequence #2): inconsistent state: %v", err)
+ }
+
+ releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
+ if err != nil {
+ t.Fatalf("Confirm (sequence #3): %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err)
+ }
+
+ // Test that you can't unlock a held lock.
+ err = m.Unlock(now, tweedle)
+ if err != ErrLocked {
+ t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err)
+ }
+
+ releaseDum()
+ if err := m.consistent(); err != nil {
+ t.Fatalf("release (sequence #5): inconsistent state: %v", err)
+ }
+
+ err = m.Unlock(now, tweedle)
+ if err != nil {
+ t.Fatalf("Unlock (sequence #6): %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err)
+ }
+}
+
+func TestMemLSNonCanonicalRoot(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+ token, err := m.Create(now, LockDetails{
+ Root: "/foo/./bar//",
+ Duration: 1 * time.Second,
+ })
+ if err != nil {
+ t.Fatalf("Create: %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Create: inconsistent state: %v", err)
+ }
+ if err := m.Unlock(now, token); err != nil {
+ t.Fatalf("Unlock: %v", err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("Unlock: inconsistent state: %v", err)
+ }
+}
+
+func TestMemLSExpiry(t *testing.T) {
+ m := NewMemLS().(*memLS)
+ testCases := []string{
+ "setNow 0",
+ "create /a.5",
+ "want /a.5",
+ "create /c.6",
+ "want /a.5 /c.6",
+ "create /a/b.7",
+ "want /a.5 /a/b.7 /c.6",
+ "setNow 4",
+ "want /a.5 /a/b.7 /c.6",
+ "setNow 5",
+ "want /a/b.7 /c.6",
+ "setNow 6",
+ "want /a/b.7",
+ "setNow 7",
+ "want ",
+ "setNow 8",
+ "want ",
+ "create /a.12",
+ "create /b.13",
+ "create /c.15",
+ "create /a/d.16",
+ "want /a.12 /a/d.16 /b.13 /c.15",
+ "refresh /a.14",
+ "want /a.14 /a/d.16 /b.13 /c.15",
+ "setNow 12",
+ "want /a.14 /a/d.16 /b.13 /c.15",
+ "setNow 13",
+ "want /a.14 /a/d.16 /c.15",
+ "setNow 14",
+ "want /a/d.16 /c.15",
+ "refresh /a/d.20",
+ "refresh /c.20",
+ "want /a/d.20 /c.20",
+ "setNow 20",
+ "want ",
+ }
+
+ tokens := map[string]string{}
+ zTime := time.Unix(0, 0)
+ now := zTime
+ for i, tc := range testCases {
+ j := strings.IndexByte(tc, ' ')
+ if j < 0 {
+ t.Fatalf("test case #%d %q: invalid command", i, tc)
+ }
+ op, arg := tc[:j], tc[j+1:]
+ switch op {
+ default:
+ t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
+
+ case "create", "refresh":
+ parts := strings.Split(arg, ".")
+ if len(parts) != 2 {
+ t.Fatalf("test case #%d %q: invalid create", i, tc)
+ }
+ root := parts[0]
+ d, err := strconv.Atoi(parts[1])
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid duration", i, tc)
+ }
+ dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now)
+
+ switch op {
+ case "create":
+ token, err := m.Create(now, LockDetails{
+ Root: root,
+ Duration: dur,
+ ZeroDepth: true,
+ })
+ if err != nil {
+ t.Fatalf("test case #%d %q: Create: %v", i, tc, err)
+ }
+ tokens[root] = token
+
+ case "refresh":
+ token := tokens[root]
+ if token == "" {
+ t.Fatalf("test case #%d %q: no token for %q", i, tc, root)
+ }
+ got, err := m.Refresh(now, token, dur)
+ if err != nil {
+ t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err)
+ }
+ want := LockDetails{
+ Root: root,
+ Duration: dur,
+ ZeroDepth: true,
+ }
+ if got != want {
+ t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want)
+ }
+ }
+
+ case "setNow":
+ d, err := strconv.Atoi(arg)
+ if err != nil {
+ t.Fatalf("test case #%d %q: invalid duration", i, tc)
+ }
+ now = time.Unix(0, 0).Add(time.Duration(d) * time.Second)
+
+ case "want":
+ m.mu.Lock()
+ m.collectExpiredNodes(now)
+ got := make([]string, 0, len(m.byToken))
+ for _, n := range m.byToken {
+ got = append(got, fmt.Sprintf("%s.%d",
+ n.details.Root, n.expiry.Sub(zTime)/time.Second))
+ }
+ m.mu.Unlock()
+ sort.Strings(got)
+ want := []string{}
+ if arg != "" {
+ want = strings.Split(arg, " ")
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want)
+ }
+ }
+
+ if err := m.consistent(); err != nil {
+ t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err)
+ }
+ }
+}
+
+func TestMemLS(t *testing.T) {
+ now := time.Unix(0, 0)
+ m := NewMemLS().(*memLS)
+ rng := rand.New(rand.NewSource(0))
+ tokens := map[string]string{}
+ nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0
+ const N = 2000
+
+ for i := 0; i < N; i++ {
+ name := lockTestNames[rng.Intn(len(lockTestNames))]
+ duration := lockTestDurations[rng.Intn(len(lockTestDurations))]
+ confirmed, unlocked := false, false
+
+ // If the name was already locked, we randomly confirm/release, refresh
+ // or unlock it. Otherwise, we create a lock.
+ token := tokens[name]
+ if token != "" {
+ switch rng.Intn(3) {
+ case 0:
+ confirmed = true
+ nConfirm++
+ release, err := m.Confirm(now, name, "", Condition{Token: token})
+ if err != nil {
+ t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err)
+ }
+ if err := m.consistent(); err != nil {
+ t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
+ }
+ release()
+
+ case 1:
+ nRefresh++
+ if _, err := m.Refresh(now, token, duration); err != nil {
+ t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err)
+ }
+
+ case 2:
+ unlocked = true
+ nUnlock++
+ if err := m.Unlock(now, token); err != nil {
+ t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err)
+ }
+ }
+
+ } else {
+ nCreate++
+ var err error
+ token, err = m.Create(now, LockDetails{
+ Root: name,
+ Duration: duration,
+ ZeroDepth: lockTestZeroDepth(name),
+ })
+ if err != nil {
+ t.Fatalf("iteration #%d: Create %q: %v", i, name, err)
+ }
+ }
+
+ if !confirmed {
+ if duration == 0 || unlocked {
+ // A zero-duration lock should expire immediately and is
+ // effectively equivalent to being unlocked.
+ tokens[name] = ""
+ } else {
+ tokens[name] = token
+ }
+ }
+
+ if err := m.consistent(); err != nil {
+ t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
+ }
+ }
+
+ if nConfirm < N/10 {
+ t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10)
+ }
+ if nCreate < N/10 {
+ t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10)
+ }
+ if nRefresh < N/10 {
+ t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10)
+ }
+ if nUnlock < N/10 {
+ t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10)
+ }
+}
+
+func (m *memLS) consistent() error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ // If m.byName is non-empty, then it must contain an entry for the root "/",
+ // and its refCount should equal the number of locked nodes.
+ if len(m.byName) > 0 {
+ n := m.byName["/"]
+ if n == nil {
+ return fmt.Errorf(`non-empty m.byName does not contain the root "/"`)
+ }
+ if n.refCount != len(m.byToken) {
+ return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken))
+ }
+ }
+
+ for name, n := range m.byName {
+ // The map keys should be consistent with the node's copy of the key.
+ if n.details.Root != name {
+ return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name)
+ }
+
+ // A name must be clean, and start with a "/".
+ if len(name) == 0 || name[0] != '/' {
+ return fmt.Errorf(`node name %q does not start with "/"`, name)
+ }
+ if name != path.Clean(name) {
+ return fmt.Errorf(`node name %q is not clean`, name)
+ }
+
+ // A node's refCount should be positive.
+ if n.refCount <= 0 {
+ return fmt.Errorf("non-positive refCount for node at name %q", name)
+ }
+
+ // A node's refCount should be the number of self-or-descendents that
+ // are locked (i.e. have a non-empty token).
+ var list []string
+ for name0, n0 := range m.byName {
+ // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z',
+ // so strings.HasPrefix is equivalent to self-or-descendent name match.
+ // We don't have to worry about "/foo/bar" being a false positive match
+ // for "/foo/b".
+ if strings.HasPrefix(name0, name) && n0.token != "" {
+ list = append(list, name0)
+ }
+ }
+ if n.refCount != len(list) {
+ sort.Strings(list)
+ return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)",
+ name, n.refCount, list, len(list))
+ }
+
+ // A node n is in m.byToken if it has a non-empty token.
+ if n.token != "" {
+ if _, ok := m.byToken[n.token]; !ok {
+ return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token)
+ }
+ }
+
+ // A node n is in m.byExpiry if it has a non-negative byExpiryIndex.
+ if n.byExpiryIndex >= 0 {
+ if n.byExpiryIndex >= len(m.byExpiry) {
+ return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry))
+ }
+ if n != m.byExpiry[n.byExpiryIndex] {
+ return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex)
+ }
+ }
+ }
+
+ for token, n := range m.byToken {
+ // The map keys should be consistent with the node's copy of the key.
+ if n.token != token {
+ return fmt.Errorf("node token %q != byToken map key %q", n.token, token)
+ }
+
+ // Every node in m.byToken is in m.byName.
+ if _, ok := m.byName[n.details.Root]; !ok {
+ return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root)
+ }
+ }
+
+ for i, n := range m.byExpiry {
+ // The slice indices should be consistent with the node's copy of the index.
+ if n.byExpiryIndex != i {
+ return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i)
+ }
+
+ // Every node in m.byExpiry is in m.byName.
+ if _, ok := m.byName[n.details.Root]; !ok {
+ return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root)
+ }
+
+ // No node in m.byExpiry should be held.
+ if n.held {
+ return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root)
+ }
+ }
+ return nil
+}
+
+func TestParseTimeout(t *testing.T) {
+ testCases := []struct {
+ s string
+ want time.Duration
+ wantErr error
+ }{{
+ "",
+ infiniteTimeout,
+ nil,
+ }, {
+ "Infinite",
+ infiniteTimeout,
+ nil,
+ }, {
+ "Infinitesimal",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "infinite",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-0",
+ 0 * time.Second,
+ nil,
+ }, {
+ "Second-123",
+ 123 * time.Second,
+ nil,
+ }, {
+ " Second-456 ",
+ 456 * time.Second,
+ nil,
+ }, {
+ "Second-4100000000",
+ 4100000000 * time.Second,
+ nil,
+ }, {
+ "junk",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second--1",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second--123",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-+123",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-0x123",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "second-123",
+ 0,
+ errInvalidTimeout,
+ }, {
+ "Second-4294967295",
+ 4294967295 * time.Second,
+ nil,
+ }, {
+ // Section 10.7 says that "The timeout value for TimeType "Second"
+ // must not be greater than 2^32-1."
+ "Second-4294967296",
+ 0,
+ errInvalidTimeout,
+ }, {
+ // This test case comes from section 9.10.9 of the spec. It says,
+ //
+ // "In this request, the client has specified that it desires an
+ // infinite-length lock, if available, otherwise a timeout of 4.1
+ // billion seconds, if available."
+ //
+ // The Go WebDAV package always supports infinite length locks,
+ // and ignores the fallback after the comma.
+ "Infinite, Second-4100000000",
+ infiniteTimeout,
+ nil,
+ }}
+
+ for _, tc := range testCases {
+ got, gotErr := parseTimeout(tc.s)
+ if got != tc.want || gotErr != tc.wantErr {
+ t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr)
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/prop.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/prop.go
new file mode 100644
index 000000000..e36a3b31d
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/prop.go
@@ -0,0 +1,418 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "golang.org/x/net/context"
+)
+
+// Proppatch describes a property update instruction as defined in RFC 4918.
+// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
+type Proppatch struct {
+ // Remove specifies whether this patch removes properties. If it does not
+ // remove them, it sets them.
+ Remove bool
+ // Props contains the properties to be set or removed.
+ Props []Property
+}
+
+// Propstat describes a XML propstat element as defined in RFC 4918.
+// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
+type Propstat struct {
+ // Props contains the properties for which Status applies.
+ Props []Property
+
+ // Status defines the HTTP status code of the properties in Prop.
+ // Allowed values include, but are not limited to the WebDAV status
+ // code extensions for HTTP/1.1.
+ // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
+ Status int
+
+ // XMLError contains the XML representation of the optional error element.
+ // XML content within this field must not rely on any predefined
+ // namespace declarations or prefixes. If empty, the XML error element
+ // is omitted.
+ XMLError string
+
+ // ResponseDescription contains the contents of the optional
+ // responsedescription field. If empty, the XML element is omitted.
+ ResponseDescription string
+}
+
+// makePropstats returns a slice containing those of x and y whose Props slice
+// is non-empty. If both are empty, it returns a slice containing an otherwise
+// zero Propstat whose HTTP status code is 200 OK.
+func makePropstats(x, y Propstat) []Propstat {
+ pstats := make([]Propstat, 0, 2)
+ if len(x.Props) != 0 {
+ pstats = append(pstats, x)
+ }
+ if len(y.Props) != 0 {
+ pstats = append(pstats, y)
+ }
+ if len(pstats) == 0 {
+ pstats = append(pstats, Propstat{
+ Status: http.StatusOK,
+ })
+ }
+ return pstats
+}
+
+// DeadPropsHolder holds the dead properties of a resource.
+//
+// Dead properties are those properties that are explicitly defined. In
+// comparison, live properties, such as DAV:getcontentlength, are implicitly
+// defined by the underlying resource, and cannot be explicitly overridden or
+// removed. See the Terminology section of
+// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
+//
+// There is a whitelist of the names of live properties. This package handles
+// all live properties, and will only pass non-whitelisted names to the Patch
+// method of DeadPropsHolder implementations.
+type DeadPropsHolder interface {
+ // DeadProps returns a copy of the dead properties held.
+ DeadProps() (map[xml.Name]Property, error)
+
+ // Patch patches the dead properties held.
+ //
+ // Patching is atomic; either all or no patches succeed. It returns (nil,
+ // non-nil) if an internal server error occurred, otherwise the Propstats
+ // collectively contain one Property for each proposed patch Property. If
+ // all patches succeed, Patch returns a slice of length one and a Propstat
+ // element with a 200 OK HTTP status code. If none succeed, for reasons
+ // other than an internal server error, no Propstat has status 200 OK.
+ //
+ // For more details on when various HTTP status codes apply, see
+ // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
+ Patch([]Proppatch) ([]Propstat, error)
+}
+
+// liveProps contains all supported, protected DAV: properties.
+var liveProps = map[xml.Name]struct {
+ // findFn implements the propfind function of this property. If nil,
+ // it indicates a hidden property.
+ findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)
+ // dir is true if the property applies to directories.
+ dir bool
+}{
+ {Space: "DAV:", Local: "resourcetype"}: {
+ findFn: findResourceType,
+ dir: true,
+ },
+ {Space: "DAV:", Local: "displayname"}: {
+ findFn: findDisplayName,
+ dir: true,
+ },
+ {Space: "DAV:", Local: "getcontentlength"}: {
+ findFn: findContentLength,
+ dir: false,
+ },
+ {Space: "DAV:", Local: "getlastmodified"}: {
+ findFn: findLastModified,
+ // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
+ // suggests that getlastmodified should only apply to GETable
+ // resources, and this package does not support GET on directories.
+ //
+ // Nonetheless, some WebDAV clients expect child directories to be
+ // sortable by getlastmodified date, so this value is true, not false.
+ // See golang.org/issue/15334.
+ dir: true,
+ },
+ {Space: "DAV:", Local: "creationdate"}: {
+ findFn: nil,
+ dir: false,
+ },
+ {Space: "DAV:", Local: "getcontentlanguage"}: {
+ findFn: nil,
+ dir: false,
+ },
+ {Space: "DAV:", Local: "getcontenttype"}: {
+ findFn: findContentType,
+ dir: false,
+ },
+ {Space: "DAV:", Local: "getetag"}: {
+ findFn: findETag,
+ // findETag implements ETag as the concatenated hex values of a file's
+ // modification time and size. This is not a reliable synchronization
+ // mechanism for directories, so we do not advertise getetag for DAV
+ // collections.
+ dir: false,
+ },
+
+ // TODO: The lockdiscovery property requires LockSystem to list the
+ // active locks on a resource.
+ {Space: "DAV:", Local: "lockdiscovery"}: {},
+ {Space: "DAV:", Local: "supportedlock"}: {
+ findFn: findSupportedLock,
+ dir: true,
+ },
+}
+
+// TODO(nigeltao) merge props and allprop?
+
+// Props returns the status of the properties named pnames for resource name.
+//
+// Each Propstat has a unique status and each property name will only be part
+// of one Propstat element.
+func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
+ f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ isDir := fi.IsDir()
+
+ var deadProps map[xml.Name]Property
+ if dph, ok := f.(DeadPropsHolder); ok {
+ deadProps, err = dph.DeadProps()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ pstatOK := Propstat{Status: http.StatusOK}
+ pstatNotFound := Propstat{Status: http.StatusNotFound}
+ for _, pn := range pnames {
+ // If this file has dead properties, check if they contain pn.
+ if dp, ok := deadProps[pn]; ok {
+ pstatOK.Props = append(pstatOK.Props, dp)
+ continue
+ }
+ // Otherwise, it must either be a live property or we don't know it.
+ if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
+ innerXML, err := prop.findFn(ctx, fs, ls, name, fi)
+ if err != nil {
+ return nil, err
+ }
+ pstatOK.Props = append(pstatOK.Props, Property{
+ XMLName: pn,
+ InnerXML: []byte(innerXML),
+ })
+ } else {
+ pstatNotFound.Props = append(pstatNotFound.Props, Property{
+ XMLName: pn,
+ })
+ }
+ }
+ return makePropstats(pstatOK, pstatNotFound), nil
+}
+
+// Propnames returns the property names defined for resource name.
+func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
+ f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ isDir := fi.IsDir()
+
+ var deadProps map[xml.Name]Property
+ if dph, ok := f.(DeadPropsHolder); ok {
+ deadProps, err = dph.DeadProps()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
+ for pn, prop := range liveProps {
+ if prop.findFn != nil && (prop.dir || !isDir) {
+ pnames = append(pnames, pn)
+ }
+ }
+ for pn := range deadProps {
+ pnames = append(pnames, pn)
+ }
+ return pnames, nil
+}
+
+// Allprop returns the properties defined for resource name and the properties
+// named in include.
+//
+// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
+// within the RFC plus dead properties. Other live properties should only be
+// returned if they are named in 'include'.
+//
+// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
+func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
+ pnames, err := propnames(ctx, fs, ls, name)
+ if err != nil {
+ return nil, err
+ }
+ // Add names from include if they are not already covered in pnames.
+ nameset := make(map[xml.Name]bool)
+ for _, pn := range pnames {
+ nameset[pn] = true
+ }
+ for _, pn := range include {
+ if !nameset[pn] {
+ pnames = append(pnames, pn)
+ }
+ }
+ return props(ctx, fs, ls, name, pnames)
+}
+
+// Patch patches the properties of resource name. The return values are
+// constrained in the same manner as DeadPropsHolder.Patch.
+func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
+ conflict := false
+loop:
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ if _, ok := liveProps[p.XMLName]; ok {
+ conflict = true
+ break loop
+ }
+ }
+ }
+ if conflict {
+ pstatForbidden := Propstat{
+ Status: http.StatusForbidden,
+ XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
+ }
+ pstatFailedDep := Propstat{
+ Status: StatusFailedDependency,
+ }
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ if _, ok := liveProps[p.XMLName]; ok {
+ pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
+ } else {
+ pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
+ }
+ }
+ }
+ return makePropstats(pstatForbidden, pstatFailedDep), nil
+ }
+
+ f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ if dph, ok := f.(DeadPropsHolder); ok {
+ ret, err := dph.Patch(patches)
+ if err != nil {
+ return nil, err
+ }
+ // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
+ // "The contents of the prop XML element must only list the names of
+ // properties to which the result in the status element applies."
+ for _, pstat := range ret {
+ for i, p := range pstat.Props {
+ pstat.Props[i] = Property{XMLName: p.XMLName}
+ }
+ }
+ return ret, nil
+ }
+ // The file doesn't implement the optional DeadPropsHolder interface, so
+ // all patches are forbidden.
+ pstat := Propstat{Status: http.StatusForbidden}
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
+ }
+ }
+ return []Propstat{pstat}, nil
+}
+
+func escapeXML(s string) string {
+ for i := 0; i < len(s); i++ {
+ // As an optimization, if s contains only ASCII letters, digits or a
+ // few special characters, the escaped value is s itself and we don't
+ // need to allocate a buffer and convert between string and []byte.
+ switch c := s[i]; {
+ case c == ' ' || c == '_' ||
+ ('+' <= c && c <= '9') || // Digits as well as + , - . and /
+ ('A' <= c && c <= 'Z') ||
+ ('a' <= c && c <= 'z'):
+ continue
+ }
+ // Otherwise, go through the full escaping process.
+ var buf bytes.Buffer
+ xml.EscapeText(&buf, []byte(s))
+ return buf.String()
+ }
+ return s
+}
+
+func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ if fi.IsDir() {
+ return `<D:collection xmlns:D="DAV:"/>`, nil
+ }
+ return "", nil
+}
+
+func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ if slashClean(name) == "/" {
+ // Hide the real name of a possibly prefixed root directory.
+ return "", nil
+ }
+ return escapeXML(fi.Name()), nil
+}
+
+func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ return strconv.FormatInt(fi.Size(), 10), nil
+}
+
+func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ return fi.ModTime().Format(http.TimeFormat), nil
+}
+
+func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ // This implementation is based on serveContent's code in the standard net/http package.
+ ctype := mime.TypeByExtension(filepath.Ext(name))
+ if ctype != "" {
+ return ctype, nil
+ }
+ // Read a chunk to decide between utf-8 text and binary.
+ var buf [512]byte
+ n, err := io.ReadFull(f, buf[:])
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return "", err
+ }
+ ctype = http.DetectContentType(buf[:n])
+ // Rewind file.
+ _, err = f.Seek(0, os.SEEK_SET)
+ return ctype, err
+}
+
+func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ // The Apache http 2.4 web server by default concatenates the
+ // modification time and size of a file. We replicate the heuristic
+ // with nanosecond granularity.
+ return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
+}
+
+func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ return `` +
+ `<D:lockentry xmlns:D="DAV:">` +
+ `<D:lockscope><D:exclusive/></D:lockscope>` +
+ `<D:locktype><D:write/></D:locktype>` +
+ `</D:lockentry>`, nil
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/prop_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/prop_test.go
new file mode 100644
index 000000000..57d0e826f
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/prop_test.go
@@ -0,0 +1,613 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "os"
+ "reflect"
+ "sort"
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+func TestMemPS(t *testing.T) {
+ ctx := context.Background()
+ // calcProps calculates the getlastmodified and getetag DAV: property
+ // values in pstats for resource name in file-system fs.
+ calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error {
+ fi, err := fs.Stat(ctx, name)
+ if err != nil {
+ return err
+ }
+ for _, pst := range pstats {
+ for i, p := range pst.Props {
+ switch p.XMLName {
+ case xml.Name{Space: "DAV:", Local: "getlastmodified"}:
+ p.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat))
+ pst.Props[i] = p
+ case xml.Name{Space: "DAV:", Local: "getetag"}:
+ if fi.IsDir() {
+ continue
+ }
+ etag, err := findETag(ctx, fs, ls, name, fi)
+ if err != nil {
+ return err
+ }
+ p.InnerXML = []byte(etag)
+ pst.Props[i] = p
+ }
+ }
+ }
+ return nil
+ }
+
+ const (
+ lockEntry = `` +
+ `<D:lockentry xmlns:D="DAV:">` +
+ `<D:lockscope><D:exclusive/></D:lockscope>` +
+ `<D:locktype><D:write/></D:locktype>` +
+ `</D:lockentry>`
+ statForbiddenError = `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`
+ )
+
+ type propOp struct {
+ op string
+ name string
+ pnames []xml.Name
+ patches []Proppatch
+ wantPnames []xml.Name
+ wantPropstats []Propstat
+ }
+
+ testCases := []struct {
+ desc string
+ noDeadProps bool
+ buildfs []string
+ propOp []propOp
+ }{{
+ desc: "propname",
+ buildfs: []string{"mkdir /dir", "touch /file"},
+ propOp: []propOp{{
+ op: "propname",
+ name: "/dir",
+ wantPnames: []xml.Name{
+ {Space: "DAV:", Local: "resourcetype"},
+ {Space: "DAV:", Local: "displayname"},
+ {Space: "DAV:", Local: "supportedlock"},
+ {Space: "DAV:", Local: "getlastmodified"},
+ },
+ }, {
+ op: "propname",
+ name: "/file",
+ wantPnames: []xml.Name{
+ {Space: "DAV:", Local: "resourcetype"},
+ {Space: "DAV:", Local: "displayname"},
+ {Space: "DAV:", Local: "getcontentlength"},
+ {Space: "DAV:", Local: "getlastmodified"},
+ {Space: "DAV:", Local: "getcontenttype"},
+ {Space: "DAV:", Local: "getetag"},
+ {Space: "DAV:", Local: "supportedlock"},
+ },
+ }},
+ }, {
+ desc: "allprop dir and file",
+ buildfs: []string{"mkdir /dir", "write /file foobarbaz"},
+ propOp: []propOp{{
+ op: "allprop",
+ name: "/dir",
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ InnerXML: []byte("dir"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
+ InnerXML: []byte(lockEntry),
+ }},
+ }},
+ }, {
+ op: "allprop",
+ name: "/file",
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(""),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ InnerXML: []byte("file"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"},
+ InnerXML: []byte("9"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"},
+ InnerXML: []byte("text/plain; charset=utf-8"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
+ InnerXML: []byte(lockEntry),
+ }},
+ }},
+ }, {
+ op: "allprop",
+ name: "/file",
+ pnames: []xml.Name{
+ {"DAV:", "resourcetype"},
+ {"foo", "bar"},
+ },
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(""),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ InnerXML: []byte("file"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"},
+ InnerXML: []byte("9"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"},
+ InnerXML: []byte("text/plain; charset=utf-8"),
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ InnerXML: nil, // Calculated during test.
+ }, {
+ XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
+ InnerXML: []byte(lockEntry),
+ }}}, {
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }}},
+ },
+ }},
+ }, {
+ desc: "propfind DAV:resourcetype",
+ buildfs: []string{"mkdir /dir", "touch /file"},
+ propOp: []propOp{{
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"DAV:", "resourcetype"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`),
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/file",
+ pnames: []xml.Name{{"DAV:", "resourcetype"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
+ InnerXML: []byte(""),
+ }},
+ }},
+ }},
+ }, {
+ desc: "propfind unsupported DAV properties",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"DAV:", "getcontentlanguage"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"DAV:", "creationdate"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "creationdate"},
+ }},
+ }},
+ }},
+ }, {
+ desc: "propfind getetag for files but not for directories",
+ buildfs: []string{"mkdir /dir", "touch /file"},
+ propOp: []propOp{{
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"DAV:", "getetag"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/file",
+ pnames: []xml.Name{{"DAV:", "getetag"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ InnerXML: nil, // Calculated during test.
+ }},
+ }},
+ }},
+ }, {
+ desc: "proppatch property on no-dead-properties file system",
+ buildfs: []string{"mkdir /dir"},
+ noDeadProps: true,
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusForbidden,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusForbidden,
+ XMLError: statForbiddenError,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
+ }},
+ }},
+ }},
+ }, {
+ desc: "proppatch dead property",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{Space: "foo", Local: "bar"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }},
+ }},
+ }},
+ }, {
+ desc: "proppatch dead property with failed dependency",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }},
+ }, {
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ InnerXML: []byte("xxx"),
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusForbidden,
+ XMLError: statForbiddenError,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
+ }},
+ }, {
+ Status: StatusFailedDependency,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{Space: "foo", Local: "bar"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }},
+ }, {
+ desc: "proppatch remove dead property",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }, {
+ XMLName: xml.Name{Space: "spam", Local: "ham"},
+ InnerXML: []byte("eggs"),
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }, {
+ XMLName: xml.Name{Space: "spam", Local: "ham"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{
+ {Space: "foo", Local: "bar"},
+ {Space: "spam", Local: "ham"},
+ },
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }, {
+ XMLName: xml.Name{Space: "spam", Local: "ham"},
+ InnerXML: []byte("eggs"),
+ }},
+ }},
+ }, {
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Remove: true,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{
+ {Space: "foo", Local: "bar"},
+ {Space: "spam", Local: "ham"},
+ },
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }, {
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "spam", Local: "ham"},
+ InnerXML: []byte("eggs"),
+ }},
+ }},
+ }},
+ }, {
+ desc: "propname with dead property",
+ buildfs: []string{"touch /file"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/file",
+ patches: []Proppatch{{
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ InnerXML: []byte("baz"),
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }, {
+ op: "propname",
+ name: "/file",
+ wantPnames: []xml.Name{
+ {Space: "DAV:", Local: "resourcetype"},
+ {Space: "DAV:", Local: "displayname"},
+ {Space: "DAV:", Local: "getcontentlength"},
+ {Space: "DAV:", Local: "getlastmodified"},
+ {Space: "DAV:", Local: "getcontenttype"},
+ {Space: "DAV:", Local: "getetag"},
+ {Space: "DAV:", Local: "supportedlock"},
+ {Space: "foo", Local: "bar"},
+ },
+ }},
+ }, {
+ desc: "proppatch remove unknown dead property",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "proppatch",
+ name: "/dir",
+ patches: []Proppatch{{
+ Remove: true,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ wantPropstats: []Propstat{{
+ Status: http.StatusOK,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo", Local: "bar"},
+ }},
+ }},
+ }},
+ }, {
+ desc: "bad: propfind unknown property",
+ buildfs: []string{"mkdir /dir"},
+ propOp: []propOp{{
+ op: "propfind",
+ name: "/dir",
+ pnames: []xml.Name{{"foo:", "bar"}},
+ wantPropstats: []Propstat{{
+ Status: http.StatusNotFound,
+ Props: []Property{{
+ XMLName: xml.Name{Space: "foo:", Local: "bar"},
+ }},
+ }},
+ }},
+ }}
+
+ for _, tc := range testCases {
+ fs, err := buildTestFS(tc.buildfs)
+ if err != nil {
+ t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err)
+ }
+ if tc.noDeadProps {
+ fs = noDeadPropsFS{fs}
+ }
+ ls := NewMemLS()
+ for _, op := range tc.propOp {
+ desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name)
+ if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil {
+ t.Fatalf("%s: calcProps: %v", desc, err)
+ }
+
+ // Call property system.
+ var propstats []Propstat
+ switch op.op {
+ case "propname":
+ pnames, err := propnames(ctx, fs, ls, op.name)
+ if err != nil {
+ t.Errorf("%s: got error %v, want nil", desc, err)
+ continue
+ }
+ sort.Sort(byXMLName(pnames))
+ sort.Sort(byXMLName(op.wantPnames))
+ if !reflect.DeepEqual(pnames, op.wantPnames) {
+ t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames)
+ }
+ continue
+ case "allprop":
+ propstats, err = allprop(ctx, fs, ls, op.name, op.pnames)
+ case "propfind":
+ propstats, err = props(ctx, fs, ls, op.name, op.pnames)
+ case "proppatch":
+ propstats, err = patch(ctx, fs, ls, op.name, op.patches)
+ default:
+ t.Fatalf("%s: %s not implemented", desc, op.op)
+ }
+ if err != nil {
+ t.Errorf("%s: got error %v, want nil", desc, err)
+ continue
+ }
+ // Compare return values from allprop, propfind or proppatch.
+ for _, pst := range propstats {
+ sort.Sort(byPropname(pst.Props))
+ }
+ for _, pst := range op.wantPropstats {
+ sort.Sort(byPropname(pst.Props))
+ }
+ sort.Sort(byStatus(propstats))
+ sort.Sort(byStatus(op.wantPropstats))
+ if !reflect.DeepEqual(propstats, op.wantPropstats) {
+ t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats)
+ }
+ }
+ }
+}
+
+func cmpXMLName(a, b xml.Name) bool {
+ if a.Space != b.Space {
+ return a.Space < b.Space
+ }
+ return a.Local < b.Local
+}
+
+type byXMLName []xml.Name
+
+func (b byXMLName) Len() int { return len(b) }
+func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) }
+
+type byPropname []Property
+
+func (b byPropname) Len() int { return len(b) }
+func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) }
+
+type byStatus []Propstat
+
+func (b byStatus) Len() int { return len(b) }
+func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status }
+
+type noDeadPropsFS struct {
+ FileSystem
+}
+
+func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
+ f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ return noDeadPropsFile{f}, nil
+}
+
+// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods
+// provided by the underlying File implementation.
+type noDeadPropsFile struct {
+ f File
+}
+
+func (f noDeadPropsFile) Close() error { return f.f.Close() }
+func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) }
+func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) }
+func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) }
+func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() }
+func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) }
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/webdav.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/webdav.go
new file mode 100644
index 000000000..7b56687fc
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/webdav.go
@@ -0,0 +1,702 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package webdav provides a WebDAV server implementation.
+package webdav // import "golang.org/x/net/webdav"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+)
+
+type Handler struct {
+ // Prefix is the URL path prefix to strip from WebDAV resource paths.
+ Prefix string
+ // FileSystem is the virtual file system.
+ FileSystem FileSystem
+ // LockSystem is the lock management system.
+ LockSystem LockSystem
+ // Logger is an optional error logger. If non-nil, it will be called
+ // for all HTTP requests.
+ Logger func(*http.Request, error)
+}
+
+func (h *Handler) stripPrefix(p string) (string, int, error) {
+ if h.Prefix == "" {
+ return p, http.StatusOK, nil
+ }
+ if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) {
+ return r, http.StatusOK, nil
+ }
+ return p, http.StatusNotFound, errPrefixMismatch
+}
+
+func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ status, err := http.StatusBadRequest, errUnsupportedMethod
+ if h.FileSystem == nil {
+ status, err = http.StatusInternalServerError, errNoFileSystem
+ } else if h.LockSystem == nil {
+ status, err = http.StatusInternalServerError, errNoLockSystem
+ } else {
+ switch r.Method {
+ case "OPTIONS":
+ status, err = h.handleOptions(w, r)
+ case "GET", "HEAD", "POST":
+ status, err = h.handleGetHeadPost(w, r)
+ case "DELETE":
+ status, err = h.handleDelete(w, r)
+ case "PUT":
+ status, err = h.handlePut(w, r)
+ case "MKCOL":
+ status, err = h.handleMkcol(w, r)
+ case "COPY", "MOVE":
+ status, err = h.handleCopyMove(w, r)
+ case "LOCK":
+ status, err = h.handleLock(w, r)
+ case "UNLOCK":
+ status, err = h.handleUnlock(w, r)
+ case "PROPFIND":
+ status, err = h.handlePropfind(w, r)
+ case "PROPPATCH":
+ status, err = h.handleProppatch(w, r)
+ }
+ }
+
+ if status != 0 {
+ w.WriteHeader(status)
+ if status != http.StatusNoContent {
+ w.Write([]byte(StatusText(status)))
+ }
+ }
+ if h.Logger != nil {
+ h.Logger(r, err)
+ }
+}
+
+func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {
+ token, err = h.LockSystem.Create(now, LockDetails{
+ Root: root,
+ Duration: infiniteTimeout,
+ ZeroDepth: true,
+ })
+ if err != nil {
+ if err == ErrLocked {
+ return "", StatusLocked, err
+ }
+ return "", http.StatusInternalServerError, err
+ }
+ return token, 0, nil
+}
+
+func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {
+ hdr := r.Header.Get("If")
+ if hdr == "" {
+ // An empty If header means that the client hasn't previously created locks.
+ // Even if this client doesn't care about locks, we still need to check that
+ // the resources aren't locked by another client, so we create temporary
+ // locks that would conflict with another client's locks. These temporary
+ // locks are unlocked at the end of the HTTP request.
+ now, srcToken, dstToken := time.Now(), "", ""
+ if src != "" {
+ srcToken, status, err = h.lock(now, src)
+ if err != nil {
+ return nil, status, err
+ }
+ }
+ if dst != "" {
+ dstToken, status, err = h.lock(now, dst)
+ if err != nil {
+ if srcToken != "" {
+ h.LockSystem.Unlock(now, srcToken)
+ }
+ return nil, status, err
+ }
+ }
+
+ return func() {
+ if dstToken != "" {
+ h.LockSystem.Unlock(now, dstToken)
+ }
+ if srcToken != "" {
+ h.LockSystem.Unlock(now, srcToken)
+ }
+ }, 0, nil
+ }
+
+ ih, ok := parseIfHeader(hdr)
+ if !ok {
+ return nil, http.StatusBadRequest, errInvalidIfHeader
+ }
+ // ih is a disjunction (OR) of ifLists, so any ifList will do.
+ for _, l := range ih.lists {
+ lsrc := l.resourceTag
+ if lsrc == "" {
+ lsrc = src
+ } else {
+ u, err := url.Parse(lsrc)
+ if err != nil {
+ continue
+ }
+ if u.Host != r.Host {
+ continue
+ }
+ lsrc, status, err = h.stripPrefix(u.Path)
+ if err != nil {
+ return nil, status, err
+ }
+ }
+ release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)
+ if err == ErrConfirmationFailed {
+ continue
+ }
+ if err != nil {
+ return nil, http.StatusInternalServerError, err
+ }
+ return release, 0, nil
+ }
+ // Section 10.4.1 says that "If this header is evaluated and all state lists
+ // fail, then the request must fail with a 412 (Precondition Failed) status."
+ // We follow the spec even though the cond_put_corrupt_token test case from
+ // the litmus test warns on seeing a 412 instead of a 423 (Locked).
+ return nil, http.StatusPreconditionFailed, ErrLocked
+}
+
+func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ ctx := getContext(r)
+ allow := "OPTIONS, LOCK, PUT, MKCOL"
+ if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil {
+ if fi.IsDir() {
+ allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
+ } else {
+ allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
+ }
+ }
+ w.Header().Set("Allow", allow)
+ // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes
+ w.Header().Set("DAV", "1, 2")
+ // http://msdn.microsoft.com/en-au/library/cc250217.aspx
+ w.Header().Set("MS-Author-Via", "DAV")
+ return 0, nil
+}
+
+func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ // TODO: check locks for read-only access??
+ ctx := getContext(r)
+ f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0)
+ if err != nil {
+ return http.StatusNotFound, err
+ }
+ defer f.Close()
+ fi, err := f.Stat()
+ if err != nil {
+ return http.StatusNotFound, err
+ }
+ if fi.IsDir() {
+ return http.StatusMethodNotAllowed, nil
+ }
+ etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ w.Header().Set("ETag", etag)
+ // Let ServeContent determine the Content-Type header.
+ http.ServeContent(w, r, reqPath, fi.ModTime(), f)
+ return 0, nil
+}
+
+func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ ctx := getContext(r)
+
+ // TODO: return MultiStatus where appropriate.
+
+ // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll
+ // returns nil (no error)." WebDAV semantics are that it should return a
+ // "404 Not Found". We therefore have to Stat before we RemoveAll.
+ if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil {
+ return http.StatusMethodNotAllowed, err
+ }
+ return http.StatusNoContent, nil
+}
+
+func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+ // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
+ // comments in http.checkEtag.
+ ctx := getContext(r)
+
+ f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return http.StatusNotFound, err
+ }
+ _, copyErr := io.Copy(f, r.Body)
+ fi, statErr := f.Stat()
+ closeErr := f.Close()
+ // TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
+ if copyErr != nil {
+ return http.StatusMethodNotAllowed, copyErr
+ }
+ if statErr != nil {
+ return http.StatusMethodNotAllowed, statErr
+ }
+ if closeErr != nil {
+ return http.StatusMethodNotAllowed, closeErr
+ }
+ etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ w.Header().Set("ETag", etag)
+ return http.StatusCreated, nil
+}
+
+func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ ctx := getContext(r)
+
+ if r.ContentLength > 0 {
+ return http.StatusUnsupportedMediaType, nil
+ }
+ if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusConflict, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ return http.StatusCreated, nil
+}
+
+func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ hdr := r.Header.Get("Destination")
+ if hdr == "" {
+ return http.StatusBadRequest, errInvalidDestination
+ }
+ u, err := url.Parse(hdr)
+ if err != nil {
+ return http.StatusBadRequest, errInvalidDestination
+ }
+ if u.Host != r.Host {
+ return http.StatusBadGateway, errInvalidDestination
+ }
+
+ src, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+
+ dst, status, err := h.stripPrefix(u.Path)
+ if err != nil {
+ return status, err
+ }
+
+ if dst == "" {
+ return http.StatusBadGateway, errInvalidDestination
+ }
+ if dst == src {
+ return http.StatusForbidden, errDestinationEqualsSource
+ }
+
+ ctx := getContext(r)
+
+ if r.Method == "COPY" {
+ // Section 7.5.1 says that a COPY only needs to lock the destination,
+ // not both destination and source. Strictly speaking, this is racy,
+ // even though a COPY doesn't modify the source, if a concurrent
+ // operation modifies the source. However, the litmus test explicitly
+ // checks that COPYing a locked-by-another source is OK.
+ release, status, err := h.confirmLocks(r, "", dst)
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ // Section 9.8.3 says that "The COPY method on a collection without a Depth
+ // header must act as if a Depth header with value "infinity" was included".
+ depth := infiniteDepth
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ depth = parseDepth(hdr)
+ if depth != 0 && depth != infiniteDepth {
+ // Section 9.8.3 says that "A client may submit a Depth header on a
+ // COPY on a collection with a value of "0" or "infinity"."
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0)
+ }
+
+ release, status, err := h.confirmLocks(r, src, dst)
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ // Section 9.9.2 says that "The MOVE method on a collection must act as if
+ // a "Depth: infinity" header was used on it. A client must not submit a
+ // Depth header on a MOVE on a collection with any value but "infinity"."
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ if parseDepth(hdr) != infiniteDepth {
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T")
+}
+
+func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {
+ duration, err := parseTimeout(r.Header.Get("Timeout"))
+ if err != nil {
+ return http.StatusBadRequest, err
+ }
+ li, status, err := readLockInfo(r.Body)
+ if err != nil {
+ return status, err
+ }
+
+ ctx := getContext(r)
+ token, ld, now, created := "", LockDetails{}, time.Now(), false
+ if li == (lockInfo{}) {
+ // An empty lockInfo means to refresh the lock.
+ ih, ok := parseIfHeader(r.Header.Get("If"))
+ if !ok {
+ return http.StatusBadRequest, errInvalidIfHeader
+ }
+ if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
+ token = ih.lists[0].conditions[0].Token
+ }
+ if token == "" {
+ return http.StatusBadRequest, errInvalidLockToken
+ }
+ ld, err = h.LockSystem.Refresh(now, token, duration)
+ if err != nil {
+ if err == ErrNoSuchLock {
+ return http.StatusPreconditionFailed, err
+ }
+ return http.StatusInternalServerError, err
+ }
+
+ } else {
+ // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
+ // then the request MUST act as if a "Depth:infinity" had been submitted."
+ depth := infiniteDepth
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ depth = parseDepth(hdr)
+ if depth != 0 && depth != infiniteDepth {
+ // Section 9.10.3 says that "Values other than 0 or infinity must not be
+ // used with the Depth header on a LOCK method".
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ ld = LockDetails{
+ Root: reqPath,
+ Duration: duration,
+ OwnerXML: li.Owner.InnerXML,
+ ZeroDepth: depth == 0,
+ }
+ token, err = h.LockSystem.Create(now, ld)
+ if err != nil {
+ if err == ErrLocked {
+ return StatusLocked, err
+ }
+ return http.StatusInternalServerError, err
+ }
+ defer func() {
+ if retErr != nil {
+ h.LockSystem.Unlock(now, token)
+ }
+ }()
+
+ // Create the resource if it didn't previously exist.
+ if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
+ f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ // TODO: detect missing intermediate dirs and return http.StatusConflict?
+ return http.StatusInternalServerError, err
+ }
+ f.Close()
+ created = true
+ }
+
+ // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
+ // Lock-Token value is a Coded-URL. We add angle brackets.
+ w.Header().Set("Lock-Token", "<"+token+">")
+ }
+
+ w.Header().Set("Content-Type", "application/xml; charset=utf-8")
+ if created {
+ // This is "w.WriteHeader(http.StatusCreated)" and not "return
+ // http.StatusCreated, nil" because we write our own (XML) response to w
+ // and Handler.ServeHTTP would otherwise write "Created".
+ w.WriteHeader(http.StatusCreated)
+ }
+ writeLockInfo(w, token, ld)
+ return 0, nil
+}
+
+func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
+ // Lock-Token value is a Coded-URL. We strip its angle brackets.
+ t := r.Header.Get("Lock-Token")
+ if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
+ return http.StatusBadRequest, errInvalidLockToken
+ }
+ t = t[1 : len(t)-1]
+
+ switch err = h.LockSystem.Unlock(time.Now(), t); err {
+ case nil:
+ return http.StatusNoContent, err
+ case ErrForbidden:
+ return http.StatusForbidden, err
+ case ErrLocked:
+ return StatusLocked, err
+ case ErrNoSuchLock:
+ return http.StatusConflict, err
+ default:
+ return http.StatusInternalServerError, err
+ }
+}
+
+func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ ctx := getContext(r)
+ fi, err := h.FileSystem.Stat(ctx, reqPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ depth := infiniteDepth
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ depth = parseDepth(hdr)
+ if depth == invalidDepth {
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ pf, status, err := readPropfind(r.Body)
+ if err != nil {
+ return status, err
+ }
+
+ mw := multistatusWriter{w: w}
+
+ walkFn := func(reqPath string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ var pstats []Propstat
+ if pf.Propname != nil {
+ pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath)
+ if err != nil {
+ return err
+ }
+ pstat := Propstat{Status: http.StatusOK}
+ for _, xmlname := range pnames {
+ pstat.Props = append(pstat.Props, Property{XMLName: xmlname})
+ }
+ pstats = append(pstats, pstat)
+ } else if pf.Allprop != nil {
+ pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
+ } else {
+ pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
+ }
+ if err != nil {
+ return err
+ }
+ return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats))
+ }
+
+ walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn)
+ closeErr := mw.close()
+ if walkErr != nil {
+ return http.StatusInternalServerError, walkErr
+ }
+ if closeErr != nil {
+ return http.StatusInternalServerError, closeErr
+ }
+ return 0, nil
+}
+
+func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ ctx := getContext(r)
+
+ if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ patches, status, err := readProppatch(r.Body)
+ if err != nil {
+ return status, err
+ }
+ pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches)
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ mw := multistatusWriter{w: w}
+ writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
+ closeErr := mw.close()
+ if writeErr != nil {
+ return http.StatusInternalServerError, writeErr
+ }
+ if closeErr != nil {
+ return http.StatusInternalServerError, closeErr
+ }
+ return 0, nil
+}
+
+func makePropstatResponse(href string, pstats []Propstat) *response {
+ resp := response{
+ Href: []string{(&url.URL{Path: href}).EscapedPath()},
+ Propstat: make([]propstat, 0, len(pstats)),
+ }
+ for _, p := range pstats {
+ var xmlErr *xmlError
+ if p.XMLError != "" {
+ xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
+ }
+ resp.Propstat = append(resp.Propstat, propstat{
+ Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
+ Prop: p.Props,
+ ResponseDescription: p.ResponseDescription,
+ Error: xmlErr,
+ })
+ }
+ return &resp
+}
+
+const (
+ infiniteDepth = -1
+ invalidDepth = -2
+)
+
+// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and
+// infiniteDepth. Parsing any other string returns invalidDepth.
+//
+// Different WebDAV methods have further constraints on valid depths:
+// - PROPFIND has no further restrictions, as per section 9.1.
+// - COPY accepts only "0" or "infinity", as per section 9.8.3.
+// - MOVE accepts only "infinity", as per section 9.9.2.
+// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
+// These constraints are enforced by the handleXxx methods.
+func parseDepth(s string) int {
+ switch s {
+ case "0":
+ return 0
+ case "1":
+ return 1
+ case "infinity":
+ return infiniteDepth
+ }
+ return invalidDepth
+}
+
+// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
+const (
+ StatusMulti = 207
+ StatusUnprocessableEntity = 422
+ StatusLocked = 423
+ StatusFailedDependency = 424
+ StatusInsufficientStorage = 507
+)
+
+func StatusText(code int) string {
+ switch code {
+ case StatusMulti:
+ return "Multi-Status"
+ case StatusUnprocessableEntity:
+ return "Unprocessable Entity"
+ case StatusLocked:
+ return "Locked"
+ case StatusFailedDependency:
+ return "Failed Dependency"
+ case StatusInsufficientStorage:
+ return "Insufficient Storage"
+ }
+ return http.StatusText(code)
+}
+
+var (
+ errDestinationEqualsSource = errors.New("webdav: destination equals source")
+ errDirectoryNotEmpty = errors.New("webdav: directory not empty")
+ errInvalidDepth = errors.New("webdav: invalid depth")
+ errInvalidDestination = errors.New("webdav: invalid destination")
+ errInvalidIfHeader = errors.New("webdav: invalid If header")
+ errInvalidLockInfo = errors.New("webdav: invalid lock info")
+ errInvalidLockToken = errors.New("webdav: invalid lock token")
+ errInvalidPropfind = errors.New("webdav: invalid propfind")
+ errInvalidProppatch = errors.New("webdav: invalid proppatch")
+ errInvalidResponse = errors.New("webdav: invalid response")
+ errInvalidTimeout = errors.New("webdav: invalid timeout")
+ errNoFileSystem = errors.New("webdav: no file system")
+ errNoLockSystem = errors.New("webdav: no lock system")
+ errNotADirectory = errors.New("webdav: not a directory")
+ errPrefixMismatch = errors.New("webdav: prefix mismatch")
+ errRecursionTooDeep = errors.New("webdav: recursion too deep")
+ errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
+ errUnsupportedMethod = errors.New("webdav: unsupported method")
+)
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/webdav_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/webdav_test.go
new file mode 100644
index 000000000..25e0d5421
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/webdav_test.go
@@ -0,0 +1,344 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+// TODO: add tests to check XML responses with the expected prefix path
+func TestPrefix(t *testing.T) {
+ const dst, blah = "Destination", "blah blah blah"
+
+ // createLockBody comes from the example in Section 9.10.7.
+ const createLockBody = `<?xml version="1.0" encoding="utf-8" ?>
+ <D:lockinfo xmlns:D='DAV:'>
+ <D:lockscope><D:exclusive/></D:lockscope>
+ <D:locktype><D:write/></D:locktype>
+ <D:owner>
+ <D:href>http://example.org/~ejw/contact.html</D:href>
+ </D:owner>
+ </D:lockinfo>
+ `
+
+ do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) {
+ var bodyReader io.Reader
+ if body != "" {
+ bodyReader = strings.NewReader(body)
+ }
+ req, err := http.NewRequest(method, urlStr, bodyReader)
+ if err != nil {
+ return nil, err
+ }
+ for len(headers) >= 2 {
+ req.Header.Add(headers[0], headers[1])
+ headers = headers[2:]
+ }
+ res, err := http.DefaultTransport.RoundTrip(req)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != wantStatusCode {
+ return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode)
+ }
+ return res.Header, nil
+ }
+
+ prefixes := []string{
+ "/",
+ "/a/",
+ "/a/b/",
+ "/a/b/c/",
+ }
+ ctx := context.Background()
+ for _, prefix := range prefixes {
+ fs := NewMemFS()
+ h := &Handler{
+ FileSystem: fs,
+ LockSystem: NewMemLS(),
+ }
+ mux := http.NewServeMux()
+ if prefix != "/" {
+ h.Prefix = prefix
+ }
+ mux.Handle(prefix, h)
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ // The script is:
+ // MKCOL /a
+ // MKCOL /a/b
+ // PUT /a/b/c
+ // COPY /a/b/c /a/b/d
+ // MKCOL /a/b/e
+ // MOVE /a/b/d /a/b/e/f
+ // LOCK /a/b/e/g
+ // PUT /a/b/e/g
+ // which should yield the (possibly stripped) filenames /a/b/c,
+ // /a/b/e/f and /a/b/e/g, plus their parent directories.
+
+ wantA := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusMovedPermanently,
+ "/a/b/": http.StatusNotFound,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil {
+ t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err)
+ continue
+ }
+
+ wantB := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusMovedPermanently,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil {
+ t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err)
+ continue
+ }
+
+ wantC := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusMovedPermanently,
+ }[prefix]
+ if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil {
+ t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err)
+ continue
+ }
+
+ wantD := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusMovedPermanently,
+ }[prefix]
+ if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil {
+ t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err)
+ continue
+ }
+
+ wantE := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil {
+ t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err)
+ continue
+ }
+
+ wantF := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil {
+ t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err)
+ continue
+ }
+
+ var lockToken string
+ wantG := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil {
+ t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err)
+ continue
+ } else {
+ lockToken = h.Get("Lock-Token")
+ }
+
+ ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken)
+ wantH := map[string]int{
+ "/": http.StatusCreated,
+ "/a/": http.StatusCreated,
+ "/a/b/": http.StatusCreated,
+ "/a/b/c/": http.StatusNotFound,
+ }[prefix]
+ if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil {
+ t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err)
+ continue
+ }
+
+ got, err := find(ctx, nil, fs, "/")
+ if err != nil {
+ t.Errorf("prefix=%-9q find: %v", prefix, err)
+ continue
+ }
+ sort.Strings(got)
+ want := map[string][]string{
+ "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"},
+ "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"},
+ "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"},
+ "/a/b/c/": {"/"},
+ }[prefix]
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want)
+ continue
+ }
+ }
+}
+
+func TestEscapeXML(t *testing.T) {
+ // These test cases aren't exhaustive, and there is more than one way to
+ // escape e.g. a quot (as "&#34;" or "&quot;") or an apos. We presume that
+ // the encoding/xml package tests xml.EscapeText more thoroughly. This test
+ // here is just a sanity check for this package's escapeXML function, and
+ // its attempt to provide a fast path (and avoid a bytes.Buffer allocation)
+ // when escaping filenames is obviously a no-op.
+ testCases := map[string]string{
+ "": "",
+ " ": " ",
+ "&": "&amp;",
+ "*": "*",
+ "+": "+",
+ ",": ",",
+ "-": "-",
+ ".": ".",
+ "/": "/",
+ "0": "0",
+ "9": "9",
+ ":": ":",
+ "<": "&lt;",
+ ">": "&gt;",
+ "A": "A",
+ "_": "_",
+ "a": "a",
+ "~": "~",
+ "\u0201": "\u0201",
+ "&amp;": "&amp;amp;",
+ "foo&<b/ar>baz": "foo&amp;&lt;b/ar&gt;baz",
+ }
+
+ for in, want := range testCases {
+ if got := escapeXML(in); got != want {
+ t.Errorf("in=%q: got %q, want %q", in, got, want)
+ }
+ }
+}
+
+func TestFilenameEscape(t *testing.T) {
+ hrefRe := regexp.MustCompile(`<D:href>([^<]*)</D:href>`)
+ displayNameRe := regexp.MustCompile(`<D:displayname>([^<]*)</D:displayname>`)
+ do := func(method, urlStr string) (string, string, error) {
+ req, err := http.NewRequest(method, urlStr, nil)
+ if err != nil {
+ return "", "", err
+ }
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", "", err
+ }
+ defer res.Body.Close()
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", "", err
+ }
+ hrefMatch := hrefRe.FindStringSubmatch(string(b))
+ if len(hrefMatch) != 2 {
+ return "", "", errors.New("D:href not found")
+ }
+ displayNameMatch := displayNameRe.FindStringSubmatch(string(b))
+ if len(displayNameMatch) != 2 {
+ return "", "", errors.New("D:displayname not found")
+ }
+
+ return hrefMatch[1], displayNameMatch[1], nil
+ }
+
+ testCases := []struct {
+ name, wantHref, wantDisplayName string
+ }{{
+ name: `/foo%bar`,
+ wantHref: `/foo%25bar`,
+ wantDisplayName: `foo%bar`,
+ }, {
+ name: `/こんにちわ世界`,
+ wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`,
+ wantDisplayName: `こんにちわ世界`,
+ }, {
+ name: `/Program Files/`,
+ wantHref: `/Program%20Files`,
+ wantDisplayName: `Program Files`,
+ }, {
+ name: `/go+lang`,
+ wantHref: `/go+lang`,
+ wantDisplayName: `go+lang`,
+ }, {
+ name: `/go&lang`,
+ wantHref: `/go&amp;lang`,
+ wantDisplayName: `go&amp;lang`,
+ }, {
+ name: `/go<lang`,
+ wantHref: `/go%3Clang`,
+ wantDisplayName: `go&lt;lang`,
+ }}
+ ctx := context.Background()
+ fs := NewMemFS()
+ for _, tc := range testCases {
+ if strings.HasSuffix(tc.name, "/") {
+ if err := fs.Mkdir(ctx, tc.name, 0755); err != nil {
+ t.Fatalf("name=%q: Mkdir: %v", tc.name, err)
+ }
+ } else {
+ f, err := fs.OpenFile(ctx, tc.name, os.O_CREATE, 0644)
+ if err != nil {
+ t.Fatalf("name=%q: OpenFile: %v", tc.name, err)
+ }
+ f.Close()
+ }
+ }
+
+ srv := httptest.NewServer(&Handler{
+ FileSystem: fs,
+ LockSystem: NewMemLS(),
+ })
+ defer srv.Close()
+
+ u, err := url.Parse(srv.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, tc := range testCases {
+ u.Path = tc.name
+ gotHref, gotDisplayName, err := do("PROPFIND", u.String())
+ if err != nil {
+ t.Errorf("name=%q: PROPFIND: %v", tc.name, err)
+ continue
+ }
+ if gotHref != tc.wantHref {
+ t.Errorf("name=%q: got href %q, want %q", tc.name, gotHref, tc.wantHref)
+ }
+ if gotDisplayName != tc.wantDisplayName {
+ t.Errorf("name=%q: got dispayname %q, want %q", tc.name, gotDisplayName, tc.wantDisplayName)
+ }
+ }
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/xml.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/xml.go
new file mode 100644
index 000000000..790dc8169
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/xml.go
@@ -0,0 +1,519 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+// The XML encoding is covered by Section 14.
+// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ // As of https://go-review.googlesource.com/#/c/12772/ which was submitted
+ // in July 2015, this package uses an internal fork of the standard
+ // library's encoding/xml package, due to changes in the way namespaces
+ // were encoded. Such changes were introduced in the Go 1.5 cycle, but were
+ // rolled back in response to https://github.com/golang/go/issues/11841
+ //
+ // However, this package's exported API, specifically the Property and
+ // DeadPropsHolder types, need to refer to the standard library's version
+ // of the xml.Name type, as code that imports this package cannot refer to
+ // the internal version.
+ //
+ // This file therefore imports both the internal and external versions, as
+ // ixml and xml, and converts between them.
+ //
+ // In the long term, this package should use the standard library's version
+ // only, and the internal fork deleted, once
+ // https://github.com/golang/go/issues/13400 is resolved.
+ ixml "golang.org/x/net/webdav/internal/xml"
+)
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
+type lockInfo struct {
+ XMLName ixml.Name `xml:"lockinfo"`
+ Exclusive *struct{} `xml:"lockscope>exclusive"`
+ Shared *struct{} `xml:"lockscope>shared"`
+ Write *struct{} `xml:"locktype>write"`
+ Owner owner `xml:"owner"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
+type owner struct {
+ InnerXML string `xml:",innerxml"`
+}
+
+func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
+ c := &countingReader{r: r}
+ if err = ixml.NewDecoder(c).Decode(&li); err != nil {
+ if err == io.EOF {
+ if c.n == 0 {
+ // An empty body means to refresh the lock.
+ // http://www.webdav.org/specs/rfc4918.html#refreshing-locks
+ return lockInfo{}, 0, nil
+ }
+ err = errInvalidLockInfo
+ }
+ return lockInfo{}, http.StatusBadRequest, err
+ }
+ // We only support exclusive (non-shared) write locks. In practice, these are
+ // the only types of locks that seem to matter.
+ if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
+ return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
+ }
+ return li, 0, nil
+}
+
+type countingReader struct {
+ n int
+ r io.Reader
+}
+
+func (c *countingReader) Read(p []byte) (int, error) {
+ n, err := c.r.Read(p)
+ c.n += n
+ return n, err
+}
+
+func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
+ depth := "infinity"
+ if ld.ZeroDepth {
+ depth = "0"
+ }
+ timeout := ld.Duration / time.Second
+ return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
+ "<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
+ " <D:locktype><D:write/></D:locktype>\n"+
+ " <D:lockscope><D:exclusive/></D:lockscope>\n"+
+ " <D:depth>%s</D:depth>\n"+
+ " <D:owner>%s</D:owner>\n"+
+ " <D:timeout>Second-%d</D:timeout>\n"+
+ " <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
+ " <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
+ "</D:activelock></D:lockdiscovery></D:prop>",
+ depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
+ )
+}
+
+func escape(s string) string {
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"', '&', '\'', '<', '>':
+ b := bytes.NewBuffer(nil)
+ ixml.EscapeText(b, []byte(s))
+ return b.String()
+ }
+ }
+ return s
+}
+
+// Next returns the next token, if any, in the XML stream of d.
+// RFC 4918 requires to ignore comments, processing instructions
+// and directives.
+// http://www.webdav.org/specs/rfc4918.html#property_values
+// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
+func next(d *ixml.Decoder) (ixml.Token, error) {
+ for {
+ t, err := d.Token()
+ if err != nil {
+ return t, err
+ }
+ switch t.(type) {
+ case ixml.Comment, ixml.Directive, ixml.ProcInst:
+ continue
+ default:
+ return t, nil
+ }
+ }
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)
+type propfindProps []xml.Name
+
+// UnmarshalXML appends the property names enclosed within start to pn.
+//
+// It returns an error if start does not contain any properties or if
+// properties contain values. Character data between properties is ignored.
+func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
+ for {
+ t, err := next(d)
+ if err != nil {
+ return err
+ }
+ switch t.(type) {
+ case ixml.EndElement:
+ if len(*pn) == 0 {
+ return fmt.Errorf("%s must not be empty", start.Name.Local)
+ }
+ return nil
+ case ixml.StartElement:
+ name := t.(ixml.StartElement).Name
+ t, err = next(d)
+ if err != nil {
+ return err
+ }
+ if _, ok := t.(ixml.EndElement); !ok {
+ return fmt.Errorf("unexpected token %T", t)
+ }
+ *pn = append(*pn, xml.Name(name))
+ }
+ }
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
+type propfind struct {
+ XMLName ixml.Name `xml:"DAV: propfind"`
+ Allprop *struct{} `xml:"DAV: allprop"`
+ Propname *struct{} `xml:"DAV: propname"`
+ Prop propfindProps `xml:"DAV: prop"`
+ Include propfindProps `xml:"DAV: include"`
+}
+
+func readPropfind(r io.Reader) (pf propfind, status int, err error) {
+ c := countingReader{r: r}
+ if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
+ if err == io.EOF {
+ if c.n == 0 {
+ // An empty body means to propfind allprop.
+ // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
+ return propfind{Allprop: new(struct{})}, 0, nil
+ }
+ err = errInvalidPropfind
+ }
+ return propfind{}, http.StatusBadRequest, err
+ }
+
+ if pf.Allprop == nil && pf.Include != nil {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ if pf.Prop != nil && pf.Propname != nil {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ return pf, 0, nil
+}
+
+// Property represents a single DAV resource property as defined in RFC 4918.
+// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
+type Property struct {
+ // XMLName is the fully qualified name that identifies this property.
+ XMLName xml.Name
+
+ // Lang is an optional xml:lang attribute.
+ Lang string `xml:"xml:lang,attr,omitempty"`
+
+ // InnerXML contains the XML representation of the property value.
+ // See http://www.webdav.org/specs/rfc4918.html#property_values
+ //
+ // Property values of complex type or mixed-content must have fully
+ // expanded XML namespaces or be self-contained with according
+ // XML namespace declarations. They must not rely on any XML
+ // namespace declarations within the scope of the XML document,
+ // even including the DAV: namespace.
+ InnerXML []byte `xml:",innerxml"`
+}
+
+// ixmlProperty is the same as the Property type except it holds an ixml.Name
+// instead of an xml.Name.
+type ixmlProperty struct {
+ XMLName ixml.Name
+ Lang string `xml:"xml:lang,attr,omitempty"`
+ InnerXML []byte `xml:",innerxml"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
+// See multistatusWriter for the "D:" namespace prefix.
+type xmlError struct {
+ XMLName ixml.Name `xml:"D:error"`
+ InnerXML []byte `xml:",innerxml"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
+// See multistatusWriter for the "D:" namespace prefix.
+type propstat struct {
+ Prop []Property `xml:"D:prop>_ignored_"`
+ Status string `xml:"D:status"`
+ Error *xmlError `xml:"D:error"`
+ ResponseDescription string `xml:"D:responsedescription,omitempty"`
+}
+
+// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
+// instead of an xml.Name.
+type ixmlPropstat struct {
+ Prop []ixmlProperty `xml:"D:prop>_ignored_"`
+ Status string `xml:"D:status"`
+ Error *xmlError `xml:"D:error"`
+ ResponseDescription string `xml:"D:responsedescription,omitempty"`
+}
+
+// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
+// before encoding. See multistatusWriter.
+func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
+ // Convert from a propstat to an ixmlPropstat.
+ ixmlPs := ixmlPropstat{
+ Prop: make([]ixmlProperty, len(ps.Prop)),
+ Status: ps.Status,
+ Error: ps.Error,
+ ResponseDescription: ps.ResponseDescription,
+ }
+ for k, prop := range ps.Prop {
+ ixmlPs.Prop[k] = ixmlProperty{
+ XMLName: ixml.Name(prop.XMLName),
+ Lang: prop.Lang,
+ InnerXML: prop.InnerXML,
+ }
+ }
+
+ for k, prop := range ixmlPs.Prop {
+ if prop.XMLName.Space == "DAV:" {
+ prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
+ ixmlPs.Prop[k] = prop
+ }
+ }
+ // Distinct type to avoid infinite recursion of MarshalXML.
+ type newpropstat ixmlPropstat
+ return e.EncodeElement(newpropstat(ixmlPs), start)
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
+// See multistatusWriter for the "D:" namespace prefix.
+type response struct {
+ XMLName ixml.Name `xml:"D:response"`
+ Href []string `xml:"D:href"`
+ Propstat []propstat `xml:"D:propstat"`
+ Status string `xml:"D:status,omitempty"`
+ Error *xmlError `xml:"D:error"`
+ ResponseDescription string `xml:"D:responsedescription,omitempty"`
+}
+
+// MultistatusWriter marshals one or more Responses into a XML
+// multistatus response.
+// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus
+// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as
+// "DAV:" on this element, is prepended on the nested response, as well as on all
+// its nested elements. All property names in the DAV: namespace are prefixed as
+// well. This is because some versions of Mini-Redirector (on windows 7) ignore
+// elements with a default namespace (no prefixed namespace). A less intrusive fix
+// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177
+type multistatusWriter struct {
+ // ResponseDescription contains the optional responsedescription
+ // of the multistatus XML element. Only the latest content before
+ // close will be emitted. Empty response descriptions are not
+ // written.
+ responseDescription string
+
+ w http.ResponseWriter
+ enc *ixml.Encoder
+}
+
+// Write validates and emits a DAV response as part of a multistatus response
+// element.
+//
+// It sets the HTTP status code of its underlying http.ResponseWriter to 207
+// (Multi-Status) and populates the Content-Type header. If r is the
+// first, valid response to be written, Write prepends the XML representation
+// of r with a multistatus tag. Callers must call close after the last response
+// has been written.
+func (w *multistatusWriter) write(r *response) error {
+ switch len(r.Href) {
+ case 0:
+ return errInvalidResponse
+ case 1:
+ if len(r.Propstat) > 0 != (r.Status == "") {
+ return errInvalidResponse
+ }
+ default:
+ if len(r.Propstat) > 0 || r.Status == "" {
+ return errInvalidResponse
+ }
+ }
+ err := w.writeHeader()
+ if err != nil {
+ return err
+ }
+ return w.enc.Encode(r)
+}
+
+// writeHeader writes a XML multistatus start element on w's underlying
+// http.ResponseWriter and returns the result of the write operation.
+// After the first write attempt, writeHeader becomes a no-op.
+func (w *multistatusWriter) writeHeader() error {
+ if w.enc != nil {
+ return nil
+ }
+ w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
+ w.w.WriteHeader(StatusMulti)
+ _, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
+ if err != nil {
+ return err
+ }
+ w.enc = ixml.NewEncoder(w.w)
+ return w.enc.EncodeToken(ixml.StartElement{
+ Name: ixml.Name{
+ Space: "DAV:",
+ Local: "multistatus",
+ },
+ Attr: []ixml.Attr{{
+ Name: ixml.Name{Space: "xmlns", Local: "D"},
+ Value: "DAV:",
+ }},
+ })
+}
+
+// Close completes the marshalling of the multistatus response. It returns
+// an error if the multistatus response could not be completed. If both the
+// return value and field enc of w are nil, then no multistatus response has
+// been written.
+func (w *multistatusWriter) close() error {
+ if w.enc == nil {
+ return nil
+ }
+ var end []ixml.Token
+ if w.responseDescription != "" {
+ name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
+ end = append(end,
+ ixml.StartElement{Name: name},
+ ixml.CharData(w.responseDescription),
+ ixml.EndElement{Name: name},
+ )
+ }
+ end = append(end, ixml.EndElement{
+ Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
+ })
+ for _, t := range end {
+ err := w.enc.EncodeToken(t)
+ if err != nil {
+ return err
+ }
+ }
+ return w.enc.Flush()
+}
+
+var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
+
+func xmlLang(s ixml.StartElement, d string) string {
+ for _, attr := range s.Attr {
+ if attr.Name == xmlLangName {
+ return attr.Value
+ }
+ }
+ return d
+}
+
+type xmlValue []byte
+
+func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
+ // The XML value of a property can be arbitrary, mixed-content XML.
+ // To make sure that the unmarshalled value contains all required
+ // namespaces, we encode all the property value XML tokens into a
+ // buffer. This forces the encoder to redeclare any used namespaces.
+ var b bytes.Buffer
+ e := ixml.NewEncoder(&b)
+ for {
+ t, err := next(d)
+ if err != nil {
+ return err
+ }
+ if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
+ break
+ }
+ if err = e.EncodeToken(t); err != nil {
+ return err
+ }
+ }
+ err := e.Flush()
+ if err != nil {
+ return err
+ }
+ *v = b.Bytes()
+ return nil
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
+type proppatchProps []Property
+
+// UnmarshalXML appends the property names and values enclosed within start
+// to ps.
+//
+// An xml:lang attribute that is defined either on the DAV:prop or property
+// name XML element is propagated to the property's Lang field.
+//
+// UnmarshalXML returns an error if start does not contain any properties or if
+// property values contain syntactically incorrect XML.
+func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
+ lang := xmlLang(start, "")
+ for {
+ t, err := next(d)
+ if err != nil {
+ return err
+ }
+ switch elem := t.(type) {
+ case ixml.EndElement:
+ if len(*ps) == 0 {
+ return fmt.Errorf("%s must not be empty", start.Name.Local)
+ }
+ return nil
+ case ixml.StartElement:
+ p := Property{
+ XMLName: xml.Name(t.(ixml.StartElement).Name),
+ Lang: xmlLang(t.(ixml.StartElement), lang),
+ }
+ err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
+ if err != nil {
+ return err
+ }
+ *ps = append(*ps, p)
+ }
+ }
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
+type setRemove struct {
+ XMLName ixml.Name
+ Lang string `xml:"xml:lang,attr,omitempty"`
+ Prop proppatchProps `xml:"DAV: prop"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
+type propertyupdate struct {
+ XMLName ixml.Name `xml:"DAV: propertyupdate"`
+ Lang string `xml:"xml:lang,attr,omitempty"`
+ SetRemove []setRemove `xml:",any"`
+}
+
+func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
+ var pu propertyupdate
+ if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
+ return nil, http.StatusBadRequest, err
+ }
+ for _, op := range pu.SetRemove {
+ remove := false
+ switch op.XMLName {
+ case ixml.Name{Space: "DAV:", Local: "set"}:
+ // No-op.
+ case ixml.Name{Space: "DAV:", Local: "remove"}:
+ for _, p := range op.Prop {
+ if len(p.InnerXML) > 0 {
+ return nil, http.StatusBadRequest, errInvalidProppatch
+ }
+ }
+ remove = true
+ default:
+ return nil, http.StatusBadRequest, errInvalidProppatch
+ }
+ patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
+ }
+ return patches, 0, nil
+}
diff --git a/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/xml_test.go b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/xml_test.go
new file mode 100644
index 000000000..a3d9e1ed8
--- /dev/null
+++ b/vendor/github.com/miekg/dns/vendor/golang.org/x/net/webdav/xml_test.go
@@ -0,0 +1,906 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+
+ ixml "golang.org/x/net/webdav/internal/xml"
+)
+
+func TestReadLockInfo(t *testing.T) {
+ // The "section x.y.z" test cases come from section x.y.z of the spec at
+ // http://www.webdav.org/specs/rfc4918.html
+ testCases := []struct {
+ desc string
+ input string
+ wantLI lockInfo
+ wantStatus int
+ }{{
+ "bad: junk",
+ "xxx",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "bad: invalid owner XML",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>\n" +
+ " <D:href> no end tag \n" +
+ " </D:owner>\n" +
+ "</D:lockinfo>",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "bad: invalid UTF-8",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>\n" +
+ " <D:href> \xff </D:href>\n" +
+ " </D:owner>\n" +
+ "</D:lockinfo>",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "bad: unfinished XML #1",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "bad: unfinished XML #2",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>\n",
+ lockInfo{},
+ http.StatusBadRequest,
+ }, {
+ "good: empty",
+ "",
+ lockInfo{},
+ 0,
+ }, {
+ "good: plain-text owner",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>gopher</D:owner>\n" +
+ "</D:lockinfo>",
+ lockInfo{
+ XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"},
+ Exclusive: new(struct{}),
+ Write: new(struct{}),
+ Owner: owner{
+ InnerXML: "gopher",
+ },
+ },
+ 0,
+ }, {
+ "section 9.10.7",
+ "" +
+ "<D:lockinfo xmlns:D='DAV:'>\n" +
+ " <D:lockscope><D:exclusive/></D:lockscope>\n" +
+ " <D:locktype><D:write/></D:locktype>\n" +
+ " <D:owner>\n" +
+ " <D:href>http://example.org/~ejw/contact.html</D:href>\n" +
+ " </D:owner>\n" +
+ "</D:lockinfo>",
+ lockInfo{
+ XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"},
+ Exclusive: new(struct{}),
+ Write: new(struct{}),
+ Owner: owner{
+ InnerXML: "\n <D:href>http://example.org/~ejw/contact.html</D:href>\n ",
+ },
+ },
+ 0,
+ }}
+
+ for _, tc := range testCases {
+ li, status, err := readLockInfo(strings.NewReader(tc.input))
+ if tc.wantStatus != 0 {
+ if err == nil {
+ t.Errorf("%s: got nil error, want non-nil", tc.desc)
+ continue
+ }
+ } else if err != nil {
+ t.Errorf("%s: %v", tc.desc, err)
+ continue
+ }
+ if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus {
+ t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v",
+ tc.desc, li, status, tc.wantLI, tc.wantStatus)
+ continue
+ }
+ }
+}
+
+func TestReadPropfind(t *testing.T) {
+ testCases := []struct {
+ desc string
+ input string
+ wantPF propfind
+ wantStatus int
+ }{{
+ desc: "propfind: propname",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:propname/>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Propname: new(struct{}),
+ },
+ }, {
+ desc: "propfind: empty body means allprop",
+ input: "",
+ wantPF: propfind{
+ Allprop: new(struct{}),
+ },
+ }, {
+ desc: "propfind: allprop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:allprop/>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Allprop: new(struct{}),
+ },
+ }, {
+ desc: "propfind: allprop followed by include",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:allprop/>\n" +
+ " <A:include><A:displayname/></A:include>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Allprop: new(struct{}),
+ Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: include followed by allprop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:include><A:displayname/></A:include>\n" +
+ " <A:allprop/>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Allprop: new(struct{}),
+ Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: propfind",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:displayname/></A:prop>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: prop with ignored comments",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop>\n" +
+ " <!-- ignore -->\n" +
+ " <A:displayname><!-- ignore --></A:displayname>\n" +
+ " </A:prop>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: propfind with ignored whitespace",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop> <A:displayname/></A:prop>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: propfind with ignored mixed-content",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop>foo<A:displayname/>bar</A:prop>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
+ },
+ }, {
+ desc: "propfind: propname with ignored element (section A.4)",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:propname/>\n" +
+ " <E:leave-out xmlns:E='E:'>*boss*</E:leave-out>\n" +
+ "</A:propfind>",
+ wantPF: propfind{
+ XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
+ Propname: new(struct{}),
+ },
+ }, {
+ desc: "propfind: bad: junk",
+ input: "xxx",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: propname and allprop (section A.3)",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:propname/>" +
+ " <A:allprop/>" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: propname and prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:displayname/></A:prop>\n" +
+ " <A:propname/>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: allprop and prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:allprop/>\n" +
+ " <A:prop><A:foo/><A:/prop>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: empty propfind with ignored element (section A.4)",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <E:expired-props/>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: empty prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop/>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: prop with just chardata",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop>foo</A:prop>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "bad: interrupted prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:foo></A:prop>\n",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "bad: malformed end element prop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:foo/></A:bar></A:prop>\n",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: property with chardata value",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:foo>bar</A:foo></A:prop>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: property with whitespace value",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:prop><A:foo> </A:foo></A:prop>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "propfind: bad: include without allprop",
+ input: "" +
+ "<A:propfind xmlns:A='DAV:'>\n" +
+ " <A:include><A:foo/></A:include>\n" +
+ "</A:propfind>",
+ wantStatus: http.StatusBadRequest,
+ }}
+
+ for _, tc := range testCases {
+ pf, status, err := readPropfind(strings.NewReader(tc.input))
+ if tc.wantStatus != 0 {
+ if err == nil {
+ t.Errorf("%s: got nil error, want non-nil", tc.desc)
+ continue
+ }
+ } else if err != nil {
+ t.Errorf("%s: %v", tc.desc, err)
+ continue
+ }
+ if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus {
+ t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v",
+ tc.desc, pf, status, tc.wantPF, tc.wantStatus)
+ continue
+ }
+ }
+}
+
+func TestMultistatusWriter(t *testing.T) {
+ ///The "section x.y.z" test cases come from section x.y.z of the spec at
+ // http://www.webdav.org/specs/rfc4918.html
+ testCases := []struct {
+ desc string
+ responses []response
+ respdesc string
+ writeHeader bool
+ wantXML string
+ wantCode int
+ wantErr error
+ }{{
+ desc: "section 9.2.2 (failed dependency)",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://ns.example.com/",
+ Local: "Authors",
+ },
+ }},
+ Status: "HTTP/1.1 424 Failed Dependency",
+ }, {
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://ns.example.com/",
+ Local: "Copyright-Owner",
+ },
+ }},
+ Status: "HTTP/1.1 409 Conflict",
+ }},
+ ResponseDescription: "Copyright Owner cannot be deleted or altered.",
+ }},
+ wantXML: `` +
+ `<?xml version="1.0" encoding="UTF-8"?>` +
+ `<multistatus xmlns="DAV:">` +
+ ` <response>` +
+ ` <href>http://example.com/foo</href>` +
+ ` <propstat>` +
+ ` <prop>` +
+ ` <Authors xmlns="http://ns.example.com/"></Authors>` +
+ ` </prop>` +
+ ` <status>HTTP/1.1 424 Failed Dependency</status>` +
+ ` </propstat>` +
+ ` <propstat xmlns="DAV:">` +
+ ` <prop>` +
+ ` <Copyright-Owner xmlns="http://ns.example.com/"></Copyright-Owner>` +
+ ` </prop>` +
+ ` <status>HTTP/1.1 409 Conflict</status>` +
+ ` </propstat>` +
+ ` <responsedescription>Copyright Owner cannot be deleted or altered.</responsedescription>` +
+ `</response>` +
+ `</multistatus>`,
+ wantCode: StatusMulti,
+ }, {
+ desc: "section 9.6.2 (lock-token-submitted)",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ Status: "HTTP/1.1 423 Locked",
+ Error: &xmlError{
+ InnerXML: []byte(`<lock-token-submitted xmlns="DAV:"/>`),
+ },
+ }},
+ wantXML: `` +
+ `<?xml version="1.0" encoding="UTF-8"?>` +
+ `<multistatus xmlns="DAV:">` +
+ ` <response>` +
+ ` <href>http://example.com/foo</href>` +
+ ` <status>HTTP/1.1 423 Locked</status>` +
+ ` <error><lock-token-submitted xmlns="DAV:"/></error>` +
+ ` </response>` +
+ `</multistatus>`,
+ wantCode: StatusMulti,
+ }, {
+ desc: "section 9.1.3",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"},
+ InnerXML: []byte(`` +
+ `<BoxType xmlns="http://ns.example.com/boxschema/">` +
+ `Box type A` +
+ `</BoxType>`),
+ }, {
+ XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"},
+ InnerXML: []byte(`` +
+ `<Name xmlns="http://ns.example.com/boxschema/">` +
+ `J.J. Johnson` +
+ `</Name>`),
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }, {
+ Prop: []Property{{
+ XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"},
+ }, {
+ XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"},
+ }},
+ Status: "HTTP/1.1 403 Forbidden",
+ ResponseDescription: "The user does not have access to the DingALing property.",
+ }},
+ }},
+ respdesc: "There has been an access violation error.",
+ wantXML: `` +
+ `<?xml version="1.0" encoding="UTF-8"?>` +
+ `<multistatus xmlns="DAV:" xmlns:B="http://ns.example.com/boxschema/">` +
+ ` <response>` +
+ ` <href>http://example.com/foo</href>` +
+ ` <propstat>` +
+ ` <prop>` +
+ ` <B:bigbox><B:BoxType>Box type A</B:BoxType></B:bigbox>` +
+ ` <B:author><B:Name>J.J. Johnson</B:Name></B:author>` +
+ ` </prop>` +
+ ` <status>HTTP/1.1 200 OK</status>` +
+ ` </propstat>` +
+ ` <propstat>` +
+ ` <prop>` +
+ ` <B:DingALing/>` +
+ ` <B:Random/>` +
+ ` </prop>` +
+ ` <status>HTTP/1.1 403 Forbidden</status>` +
+ ` <responsedescription>The user does not have access to the DingALing property.</responsedescription>` +
+ ` </propstat>` +
+ ` </response>` +
+ ` <responsedescription>There has been an access violation error.</responsedescription>` +
+ `</multistatus>`,
+ wantCode: StatusMulti,
+ }, {
+ desc: "no response written",
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "no response written (with description)",
+ respdesc: "too bad",
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "empty multistatus with header",
+ writeHeader: true,
+ wantXML: `<multistatus xmlns="DAV:"></multistatus>`,
+ wantCode: StatusMulti,
+ }, {
+ desc: "bad: no href",
+ responses: []response{{
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://example.com/",
+ Local: "foo",
+ },
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }},
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "bad: multiple hrefs and no status",
+ responses: []response{{
+ Href: []string{"http://example.com/foo", "http://example.com/bar"},
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "bad: one href and no propstat",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "bad: status with one href and propstat",
+ responses: []response{{
+ Href: []string{"http://example.com/foo"},
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://example.com/",
+ Local: "foo",
+ },
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }, {
+ desc: "bad: multiple hrefs and propstat",
+ responses: []response{{
+ Href: []string{
+ "http://example.com/foo",
+ "http://example.com/bar",
+ },
+ Propstat: []propstat{{
+ Prop: []Property{{
+ XMLName: xml.Name{
+ Space: "http://example.com/",
+ Local: "foo",
+ },
+ }},
+ Status: "HTTP/1.1 200 OK",
+ }},
+ }},
+ wantErr: errInvalidResponse,
+ // default of http.responseWriter
+ wantCode: http.StatusOK,
+ }}
+
+ n := xmlNormalizer{omitWhitespace: true}
+loop:
+ for _, tc := range testCases {
+ rec := httptest.NewRecorder()
+ w := multistatusWriter{w: rec, responseDescription: tc.respdesc}
+ if tc.writeHeader {
+ if err := w.writeHeader(); err != nil {
+ t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err)
+ continue
+ }
+ }
+ for _, r := range tc.responses {
+ if err := w.write(&r); err != nil {
+ if err != tc.wantErr {
+ t.Errorf("%s: got write error %v, want %v",
+ tc.desc, err, tc.wantErr)
+ }
+ continue loop
+ }
+ }
+ if err := w.close(); err != tc.wantErr {
+ t.Errorf("%s: got close error %v, want %v",
+ tc.desc, err, tc.wantErr)
+ continue
+ }
+ if rec.Code != tc.wantCode {
+ t.Errorf("%s: got HTTP status code %d, want %d\n",
+ tc.desc, rec.Code, tc.wantCode)
+ continue
+ }
+ gotXML := rec.Body.String()
+ eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML))
+ if err != nil {
+ t.Errorf("%s: equalXML: %v", tc.desc, err)
+ continue
+ }
+ if !eq {
+ t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML)
+ }
+ }
+}
+
+func TestReadProppatch(t *testing.T) {
+ ppStr := func(pps []Proppatch) string {
+ var outer []string
+ for _, pp := range pps {
+ var inner []string
+ for _, p := range pp.Props {
+ inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}",
+ p.XMLName, p.Lang, p.InnerXML))
+ }
+ outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}",
+ pp.Remove, strings.Join(inner, ", ")))
+ }
+ return "[" + strings.Join(outer, ", ") + "]"
+ }
+
+ testCases := []struct {
+ desc string
+ input string
+ wantPP []Proppatch
+ wantStatus int
+ }{{
+ desc: "proppatch: section 9.2 (with simple property value)",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:"` +
+ ` xmlns:Z="http://ns.example.com/z/">` +
+ ` <D:set>` +
+ ` <D:prop><Z:Authors>somevalue</Z:Authors></D:prop>` +
+ ` </D:set>` +
+ ` <D:remove>` +
+ ` <D:prop><Z:Copyright-Owner/></D:prop>` +
+ ` </D:remove>` +
+ `</D:propertyupdate>`,
+ wantPP: []Proppatch{{
+ Props: []Property{{
+ xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"},
+ "",
+ []byte(`somevalue`),
+ }},
+ }, {
+ Remove: true,
+ Props: []Property{{
+ xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"},
+ "",
+ nil,
+ }},
+ }},
+ }, {
+ desc: "proppatch: lang attribute on prop",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:">` +
+ ` <D:set>` +
+ ` <D:prop xml:lang="en">` +
+ ` <foo xmlns="http://example.com/ns"/>` +
+ ` </D:prop>` +
+ ` </D:set>` +
+ `</D:propertyupdate>`,
+ wantPP: []Proppatch{{
+ Props: []Property{{
+ xml.Name{Space: "http://example.com/ns", Local: "foo"},
+ "en",
+ nil,
+ }},
+ }},
+ }, {
+ desc: "bad: remove with value",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:"` +
+ ` xmlns:Z="http://ns.example.com/z/">` +
+ ` <D:remove>` +
+ ` <D:prop>` +
+ ` <Z:Authors>` +
+ ` <Z:Author>Jim Whitehead</Z:Author>` +
+ ` </Z:Authors>` +
+ ` </D:prop>` +
+ ` </D:remove>` +
+ `</D:propertyupdate>`,
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "bad: empty propertyupdate",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:"` +
+ `</D:propertyupdate>`,
+ wantStatus: http.StatusBadRequest,
+ }, {
+ desc: "bad: empty prop",
+ input: `` +
+ `<?xml version="1.0" encoding="utf-8" ?>` +
+ `<D:propertyupdate xmlns:D="DAV:"` +
+ ` xmlns:Z="http://ns.example.com/z/">` +
+ ` <D:remove>` +
+ ` <D:prop/>` +
+ ` </D:remove>` +
+ `</D:propertyupdate>`,
+ wantStatus: http.StatusBadRequest,
+ }}
+
+ for _, tc := range testCases {
+ pp, status, err := readProppatch(strings.NewReader(tc.input))
+ if tc.wantStatus != 0 {
+ if err == nil {
+ t.Errorf("%s: got nil error, want non-nil", tc.desc)
+ continue
+ }
+ } else if err != nil {
+ t.Errorf("%s: %v", tc.desc, err)
+ continue
+ }
+ if status != tc.wantStatus {
+ t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus)
+ continue
+ }
+ if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus {
+ t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP))
+ }
+ }
+}
+
+func TestUnmarshalXMLValue(t *testing.T) {
+ testCases := []struct {
+ desc string
+ input string
+ wantVal string
+ }{{
+ desc: "simple char data",
+ input: "<root>foo</root>",
+ wantVal: "foo",
+ }, {
+ desc: "empty element",
+ input: "<root><foo/></root>",
+ wantVal: "<foo/>",
+ }, {
+ desc: "preserve namespace",
+ input: `<root><foo xmlns="bar"/></root>`,
+ wantVal: `<foo xmlns="bar"/>`,
+ }, {
+ desc: "preserve root element namespace",
+ input: `<root xmlns:bar="bar"><bar:foo/></root>`,
+ wantVal: `<foo xmlns="bar"/>`,
+ }, {
+ desc: "preserve whitespace",
+ input: "<root> \t </root>",
+ wantVal: " \t ",
+ }, {
+ desc: "preserve mixed content",
+ input: `<root xmlns="bar"> <foo>a<bam xmlns="baz"/> </foo> </root>`,
+ wantVal: ` <foo xmlns="bar">a<bam xmlns="baz"/> </foo> `,
+ }, {
+ desc: "section 9.2",
+ input: `` +
+ `<Z:Authors xmlns:Z="http://ns.example.com/z/">` +
+ ` <Z:Author>Jim Whitehead</Z:Author>` +
+ ` <Z:Author>Roy Fielding</Z:Author>` +
+ `</Z:Authors>`,
+ wantVal: `` +
+ ` <Author xmlns="http://ns.example.com/z/">Jim Whitehead</Author>` +
+ ` <Author xmlns="http://ns.example.com/z/">Roy Fielding</Author>`,
+ }, {
+ desc: "section 4.3.1 (mixed content)",
+ input: `` +
+ `<x:author ` +
+ ` xmlns:x='http://example.com/ns' ` +
+ ` xmlns:D="DAV:">` +
+ ` <x:name>Jane Doe</x:name>` +
+ ` <!-- Jane's contact info -->` +
+ ` <x:uri type='email'` +
+ ` added='2005-11-26'>mailto:jane.doe@example.com</x:uri>` +
+ ` <x:uri type='web'` +
+ ` added='2005-11-27'>http://www.example.com</x:uri>` +
+ ` <x:notes xmlns:h='http://www.w3.org/1999/xhtml'>` +
+ ` Jane has been working way <h:em>too</h:em> long on the` +
+ ` long-awaited revision of <![CDATA[<RFC2518>]]>.` +
+ ` </x:notes>` +
+ `</x:author>`,
+ wantVal: `` +
+ ` <name xmlns="http://example.com/ns">Jane Doe</name>` +
+ ` ` +
+ ` <uri type='email'` +
+ ` xmlns="http://example.com/ns" ` +
+ ` added='2005-11-26'>mailto:jane.doe@example.com</uri>` +
+ ` <uri added='2005-11-27'` +
+ ` type='web'` +
+ ` xmlns="http://example.com/ns">http://www.example.com</uri>` +
+ ` <notes xmlns="http://example.com/ns" ` +
+ ` xmlns:h="http://www.w3.org/1999/xhtml">` +
+ ` Jane has been working way <h:em>too</h:em> long on the` +
+ ` long-awaited revision of &lt;RFC2518&gt;.` +
+ ` </notes>`,
+ }}
+
+ var n xmlNormalizer
+ for _, tc := range testCases {
+ d := ixml.NewDecoder(strings.NewReader(tc.input))
+ var v xmlValue
+ if err := d.Decode(&v); err != nil {
+ t.Errorf("%s: got error %v, want nil", tc.desc, err)
+ continue
+ }
+ eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal))
+ if err != nil {
+ t.Errorf("%s: equalXML: %v", tc.desc, err)
+ continue
+ }
+ if !eq {
+ t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal)
+ }
+ }
+}
+
+// xmlNormalizer normalizes XML.
+type xmlNormalizer struct {
+ // omitWhitespace instructs to ignore whitespace between element tags.
+ omitWhitespace bool
+ // omitComments instructs to ignore XML comments.
+ omitComments bool
+}
+
+// normalize writes the normalized XML content of r to w. It applies the
+// following rules
+//
+// * Rename namespace prefixes according to an internal heuristic.
+// * Remove unnecessary namespace declarations.
+// * Sort attributes in XML start elements in lexical order of their
+// fully qualified name.
+// * Remove XML directives and processing instructions.
+// * Remove CDATA between XML tags that only contains whitespace, if
+// instructed to do so.
+// * Remove comments, if instructed to do so.
+//
+func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error {
+ d := ixml.NewDecoder(r)
+ e := ixml.NewEncoder(w)
+ for {
+ t, err := d.Token()
+ if err != nil {
+ if t == nil && err == io.EOF {
+ break
+ }
+ return err
+ }
+ switch val := t.(type) {
+ case ixml.Directive, ixml.ProcInst:
+ continue
+ case ixml.Comment:
+ if n.omitComments {
+ continue
+ }
+ case ixml.CharData:
+ if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 {
+ continue
+ }
+ case ixml.StartElement:
+ start, _ := ixml.CopyToken(val).(ixml.StartElement)
+ attr := start.Attr[:0]
+ for _, a := range start.Attr {
+ if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" {
+ continue
+ }
+ attr = append(attr, a)
+ }
+ sort.Sort(byName(attr))
+ start.Attr = attr
+ t = start
+ }
+ err = e.EncodeToken(t)
+ if err != nil {
+ return err
+ }
+ }
+ return e.Flush()
+}
+
+// equalXML tests for equality of the normalized XML contents of a and b.
+func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) {
+ var buf bytes.Buffer
+ if err := n.normalize(&buf, a); err != nil {
+ return false, err
+ }
+ normA := buf.String()
+ buf.Reset()
+ if err := n.normalize(&buf, b); err != nil {
+ return false, err
+ }
+ normB := buf.String()
+ return normA == normB, nil
+}
+
+type byName []ixml.Attr
+
+func (a byName) Len() int { return len(a) }
+func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byName) Less(i, j int) bool {
+ if a[i].Name.Space != a[j].Name.Space {
+ return a[i].Name.Space < a[j].Name.Space
+ }
+ return a[i].Name.Local < a[j].Name.Local
+}