summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/spf13
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/spf13')
-rw-r--r--vendor/github.com/spf13/afero/.travis.yml20
-rw-r--r--vendor/github.com/spf13/afero/LICENSE.txt174
-rw-r--r--vendor/github.com/spf13/afero/README.md449
-rw-r--r--vendor/github.com/spf13/afero/afero.go108
-rw-r--r--vendor/github.com/spf13/afero/afero_test.go699
-rw-r--r--vendor/github.com/spf13/afero/appveyor.yml15
-rw-r--r--vendor/github.com/spf13/afero/basepath.go145
-rw-r--r--vendor/github.com/spf13/afero/basepath_test.go142
-rw-r--r--vendor/github.com/spf13/afero/cacheOnReadFs.go295
-rw-r--r--vendor/github.com/spf13/afero/composite_test.go368
-rw-r--r--vendor/github.com/spf13/afero/const_bsds.go22
-rw-r--r--vendor/github.com/spf13/afero/const_win_unix.go25
-rw-r--r--vendor/github.com/spf13/afero/copyOnWriteFs.go253
-rw-r--r--vendor/github.com/spf13/afero/copyOnWriteFs_test.go23
-rw-r--r--vendor/github.com/spf13/afero/httpFs.go110
-rw-r--r--vendor/github.com/spf13/afero/ioutil.go230
-rw-r--r--vendor/github.com/spf13/afero/ioutil_test.go112
-rw-r--r--vendor/github.com/spf13/afero/mem/dir.go37
-rw-r--r--vendor/github.com/spf13/afero/mem/dirmap.go43
-rw-r--r--vendor/github.com/spf13/afero/mem/file.go285
-rw-r--r--vendor/github.com/spf13/afero/memmap.go361
-rw-r--r--vendor/github.com/spf13/afero/memmap_test.go345
-rw-r--r--vendor/github.com/spf13/afero/memradix.go14
-rw-r--r--vendor/github.com/spf13/afero/os.go94
-rw-r--r--vendor/github.com/spf13/afero/path.go108
-rw-r--r--vendor/github.com/spf13/afero/path_test.go69
-rw-r--r--vendor/github.com/spf13/afero/readonlyfs.go70
-rw-r--r--vendor/github.com/spf13/afero/regexpfs.go214
-rw-r--r--vendor/github.com/spf13/afero/ro_regexp_test.go96
-rw-r--r--vendor/github.com/spf13/afero/sftpfs/file.go95
-rw-r--r--vendor/github.com/spf13/afero/sftpfs/sftp.go129
-rw-r--r--vendor/github.com/spf13/afero/sftpfs/sftp_test_go286
-rw-r--r--vendor/github.com/spf13/afero/unionFile.go274
-rw-r--r--vendor/github.com/spf13/afero/util.go331
-rw-r--r--vendor/github.com/spf13/afero/util_test.go450
-rw-r--r--vendor/github.com/spf13/cast/.gitignore25
-rw-r--r--vendor/github.com/spf13/cast/.travis.yml14
-rw-r--r--vendor/github.com/spf13/cast/LICENSE21
-rw-r--r--vendor/github.com/spf13/cast/Makefile38
-rw-r--r--vendor/github.com/spf13/cast/README.md75
-rw-r--r--vendor/github.com/spf13/cast/cast.go153
-rw-r--r--vendor/github.com/spf13/cast/cast_test.go1151
-rw-r--r--vendor/github.com/spf13/cast/caste.go1117
-rw-r--r--vendor/github.com/spf13/cobra/command.go1
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/.gitignore22
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/LICENSE21
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/README.md148
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/default_notepad.go113
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/default_notepad_test.go102
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/log_counter.go56
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/notepad.go195
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/notepad_test.go41
-rw-r--r--vendor/github.com/spf13/viper/.gitignore24
-rw-r--r--vendor/github.com/spf13/viper/.travis.yml26
-rw-r--r--vendor/github.com/spf13/viper/LICENSE21
-rw-r--r--vendor/github.com/spf13/viper/README.md621
-rw-r--r--vendor/github.com/spf13/viper/flags.go57
-rw-r--r--vendor/github.com/spf13/viper/flags_test.go66
-rw-r--r--vendor/github.com/spf13/viper/nohup.out1
-rw-r--r--vendor/github.com/spf13/viper/overrides_test.go173
-rw-r--r--vendor/github.com/spf13/viper/remote/remote.go107
-rw-r--r--vendor/github.com/spf13/viper/util.go282
-rw-r--r--vendor/github.com/spf13/viper/util_test.go55
-rw-r--r--vendor/github.com/spf13/viper/viper.go1550
-rw-r--r--vendor/github.com/spf13/viper/viper_test.go1154
65 files changed, 13921 insertions, 0 deletions
diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml
new file mode 100644
index 000000000..6c296d293
--- /dev/null
+++ b/vendor/github.com/spf13/afero/.travis.yml
@@ -0,0 +1,20 @@
+sudo: false
+language: go
+
+go:
+ - 1.7.5
+ - 1.8
+ - tip
+
+os:
+ - linux
+ - osx
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+script:
+ - go test -v ./...
+ - go build
diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/vendor/github.com/spf13/afero/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md
new file mode 100644
index 000000000..d9e332730
--- /dev/null
+++ b/vendor/github.com/spf13/afero/README.md
@@ -0,0 +1,449 @@
+![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png)
+
+A FileSystem Abstraction System for Go
+
+[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+# Overview
+
+Afero is an filesystem framework providing a simple, uniform and universal API
+interacting with any filesystem, as an abstraction layer providing interfaces,
+types and methods. Afero has an exceptionally clean interface and simple design
+without needless constructors or initialization methods.
+
+Afero is also a library providing a base set of interoperable backend
+filesystems that make it easy to work with afero while retaining all the power
+and benefit of the os and ioutil packages.
+
+Afero provides significant improvements over using the os package alone, most
+notably the ability to create mock and testing filesystems without relying on the disk.
+
+It is suitable for use in a any situation where you would consider using the OS
+package as it provides an additional abstraction that makes it easy to use a
+memory backed file system during testing. It also adds support for the http
+filesystem for full interoperability.
+
+
+## Afero Features
+
+* A single consistent API for accessing a variety of filesystems
+* Interoperation between a variety of file system types
+* A set of interfaces to encourage and enforce interoperability between backends
+* An atomic cross platform memory backed file system
+* Support for compositional (union) file systems by combining multiple file systems acting as one
+* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
+* A set of utility functions ported from io, ioutil & hugo to be afero aware
+
+
+# Using Afero
+
+Afero is easy to use and easier to adopt.
+
+A few different ways you could use Afero:
+
+* Use the interfaces alone to define you own file system.
+* Wrap for the OS packages.
+* Define different filesystems for different parts of your application.
+* Use Afero for mock filesystems while testing
+
+## Step 1: Install Afero
+
+First use go get to install the latest version of the library.
+
+ $ go get github.com/spf13/afero
+
+Next include Afero in your application.
+```go
+import "github.com/spf13/afero"
+```
+
+## Step 2: Declare a backend
+
+First define a package variable and set it to a pointer to a filesystem.
+```go
+var AppFs afero.Fs = afero.NewMemMapFs()
+
+or
+
+var AppFs afero.Fs = afero.NewOsFs()
+```
+It is important to note that if you repeat the composite literal you
+will be using a completely new and isolated filesystem. In the case of
+OsFs it will still use the same underlying filesystem but will reduce
+the ability to drop in other filesystems as desired.
+
+## Step 3: Use it like you would the OS package
+
+Throughout your application use any function and method like you normally
+would.
+
+So if my application before had:
+```go
+os.Open('/tmp/foo')
+```
+We would replace it with a call to `AppFs.Open('/tmp/foo')`.
+
+`AppFs` being the variable we defined above.
+
+
+## List of all available functions
+
+File System Methods Available:
+```go
+Chmod(name string, mode os.FileMode) : error
+Chtimes(name string, atime time.Time, mtime time.Time) : error
+Create(name string) : File, error
+Mkdir(name string, perm os.FileMode) : error
+MkdirAll(path string, perm os.FileMode) : error
+Name() : string
+Open(name string) : File, error
+OpenFile(name string, flag int, perm os.FileMode) : File, error
+Remove(name string) : error
+RemoveAll(path string) : error
+Rename(oldname, newname string) : error
+Stat(name string) : os.FileInfo, error
+```
+File Interfaces and Methods Available:
+```go
+io.Closer
+io.Reader
+io.ReaderAt
+io.Seeker
+io.Writer
+io.WriterAt
+
+Name() : string
+Readdir(count int) : []os.FileInfo, error
+Readdirnames(n int) : []string, error
+Stat() : os.FileInfo, error
+Sync() : error
+Truncate(size int64) : error
+WriteString(s string) : ret int, err error
+```
+In some applications it may make sense to define a new package that
+simply exports the file system variable for easy access from anywhere.
+
+## Using Afero's utility functions
+
+Afero provides a set of functions to make it easier to use the underlying file systems.
+These functions have been primarily ported from io & ioutil with some developed for Hugo.
+
+The afero utilities support all afero compatible backends.
+
+The list of utilities includes:
+
+```go
+DirExists(path string) (bool, error)
+Exists(path string) (bool, error)
+FileContainsBytes(filename string, subslice []byte) (bool, error)
+GetTempDir(subPath string) string
+IsDir(path string) (bool, error)
+IsEmpty(path string) (bool, error)
+ReadDir(dirname string) ([]os.FileInfo, error)
+ReadFile(filename string) ([]byte, error)
+SafeWriteReader(path string, r io.Reader) (err error)
+TempDir(dir, prefix string) (name string, err error)
+TempFile(dir, prefix string) (f File, err error)
+Walk(root string, walkFn filepath.WalkFunc) error
+WriteFile(filename string, data []byte, perm os.FileMode) error
+WriteReader(path string, r io.Reader) (err error)
+```
+For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
+
+They are available under two different approaches to use. You can either call
+them directly where the first parameter of each function will be the file
+system, or you can declare a new `Afero`, a custom type used to bind these
+functions as methods to a given filesystem.
+
+### Calling utilities directly
+
+```go
+fs := new(afero.MemMapFs)
+f, err := afero.TempFile(fs,"", "ioutil-test")
+
+```
+
+### Calling via Afero
+
+```go
+fs := afero.NewMemMapFs
+afs := &Afero{Fs: fs}
+f, err := afs.TempFile("", "ioutil-test")
+```
+
+## Using Afero for Testing
+
+There is a large benefit to using a mock filesystem for testing. It has a
+completely blank state every time it is initialized and can be easily
+reproducible regardless of OS. You could create files to your heart’s content
+and the file access would be fast while also saving you from all the annoying
+issues with deleting temporary files, Windows file locking, etc. The MemMapFs
+backend is perfect for testing.
+
+* Much faster than performing I/O operations on disk
+* Avoid security issues and permissions
+* Far more control. 'rm -rf /' with confidence
+* Test setup is far more easier to do
+* No test cleanup needed
+
+One way to accomplish this is to define a variable as mentioned above.
+In your application this will be set to afero.NewOsFs() during testing you
+can set it to afero.NewMemMapFs().
+
+It wouldn't be uncommon to have each test initialize a blank slate memory
+backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
+appropriate in my application code. This approach ensures that Tests are order
+independent, with no test relying on the state left by an earlier test.
+
+Then in my tests I would initialize a new MemMapFs for each test:
+```go
+func TestExist(t *testing.T) {
+ appFS := afero.NewMemMapFs()
+ // create test files and directories
+ appFS.MkdirAll("src/a", 0755)
+ afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
+ afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
+ name := "src/c"
+ _, err := appFS.Stat(name)
+ if os.IsNotExist(err) {
+ t.Errorf("file \"%s\" does not exist.\n", name)
+ }
+}
+```
+
+# Available Backends
+
+## Operating System Native
+
+### OsFs
+
+The first is simply a wrapper around the native OS calls. This makes it
+very easy to use as all of the calls are the same as the existing OS
+calls. It also makes it trivial to have your code use the OS during
+operation and a mock filesystem during testing or as needed.
+
+```go
+appfs := afero.NewOsFs()
+appfs.MkdirAll("src/a", 0755))
+```
+
+## Memory Backed Storage
+
+### MemMapFs
+
+Afero also provides a fully atomic memory backed filesystem perfect for use in
+mocking and to speed up unnecessary disk io when persistence isn’t
+necessary. It is fully concurrent and will work within go routines
+safely.
+
+```go
+mm := afero.NewMemMapFs()
+mm.MkdirAll("src/a", 0755))
+```
+
+#### InMemoryFile
+
+As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
+backed file implementation. This can be used in other memory backed file
+systems with ease. Plans are to add a radix tree memory stored file
+system using InMemoryFile.
+
+## Network Interfaces
+
+### SftpFs
+
+Afero has experimental support for secure file transfer protocol (sftp). Which can
+be used to perform file operations over a encrypted channel.
+
+## Filtering Backends
+
+### BasePathFs
+
+The BasePathFs restricts all operations to a given path within an Fs.
+The given file name to the operations on this Fs will be prepended with
+the base path before calling the source Fs.
+
+```go
+bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
+```
+
+### ReadOnlyFs
+
+A thin wrapper around the source Fs providing a read only view.
+
+```go
+fs := afero.NewReadOnlyFs(afero.NewOsFs())
+_, err := fs.Create("/file.txt")
+// err = syscall.EPERM
+```
+
+# RegexpFs
+
+A filtered view on file names, any file NOT matching
+the passed regexp will be treated as non-existing.
+Files not matching the regexp provided will not be created.
+Directories are not filtered.
+
+```go
+fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
+_, err := fs.Create("/file.html")
+// err = syscall.ENOENT
+```
+
+### HttpFs
+
+Afero provides an http compatible backend which can wrap any of the existing
+backends.
+
+The Http package requires a slightly specific version of Open which
+returns an http.File type.
+
+Afero provides an httpFs file system which satisfies this requirement.
+Any Afero FileSystem can be used as an httpFs.
+
+```go
+httpFs := afero.NewHttpFs(<ExistingFS>)
+fileserver := http.FileServer(httpFs.Dir(<PATH>)))
+http.Handle("/", fileserver)
+```
+
+## Composite Backends
+
+Afero provides the ability have two filesystems (or more) act as a single
+file system.
+
+### CacheOnReadFs
+
+The CacheOnReadFs will lazily make copies of any accessed files from the base
+layer into the overlay. Subsequent reads will be pulled from the overlay
+directly permitting the request is within the cache duration of when it was
+created in the overlay.
+
+If the base filesystem is writeable, any changes to files will be
+done first to the base, then to the overlay layer. Write calls to open file
+handles like `Write()` or `Truncate()` to the overlay first.
+
+To writing files to the overlay only, you can use the overlay Fs directly (not
+via the union Fs).
+
+Cache files in the layer for the given time.Duration, a cache duration of 0
+means "forever" meaning the file will not be re-requested from the base ever.
+
+A read-only base will make the overlay also read-only but still copy files
+from the base to the overlay when they're not present (or outdated) in the
+caching layer.
+
+```go
+base := afero.NewOsFs()
+layer := afero.NewMemMapFs()
+ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
+```
+
+### CopyOnWriteFs()
+
+The CopyOnWriteFs is a read only base file system with a potentially
+writeable layer on top.
+
+Read operations will first look in the overlay and if not found there, will
+serve the file from the base.
+
+Changes to the file system will only be made in the overlay.
+
+Any attempt to modify a file found only in the base will copy the file to the
+overlay layer before modification (including opening a file with a writable
+handle).
+
+Removing and Renaming files present only in the base layer is not currently
+permitted. If a file is present in the base layer and the overlay, only the
+overlay will be removed/renamed.
+
+```go
+ base := afero.NewOsFs()
+ roBase := afero.NewReadOnlyFs(base)
+ ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
+
+ fh, _ = ufs.Create("/home/test/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+```
+
+In this example all write operations will only occur in memory (MemMapFs)
+leaving the base filesystem (OsFs) untouched.
+
+
+## Desired/possible backends
+
+The following is a short list of possible backends we hope someone will
+implement:
+
+* SSH
+* ZIP
+* TAR
+* S3
+
+# About the project
+
+## What's in the name
+
+Afero comes from the latin roots Ad-Facere.
+
+**"Ad"** is a prefix meaning "to".
+
+**"Facere"** is a form of the root "faciō" making "make or do".
+
+The literal meaning of afero is "to make" or "to do" which seems very fitting
+for a library that allows one to make files and directories and do things with them.
+
+The English word that shares the same roots as Afero is "affair". Affair shares
+the same concept but as a noun it means "something that is made or done" or "an
+object of a particular type".
+
+It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
+Googles very well.
+
+## Release Notes
+
+* **0.10.0** 2015.12.10
+ * Full compatibility with Windows
+ * Introduction of afero utilities
+ * Test suite rewritten to work cross platform
+ * Normalize paths for MemMapFs
+ * Adding Sync to the file interface
+ * **Breaking Change** Walk and ReadDir have changed parameter order
+ * Moving types used by MemMapFs to a subpackage
+ * General bugfixes and improvements
+* **0.9.0** 2015.11.05
+ * New Walk function similar to filepath.Walk
+ * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC
+ * MemMapFs.Remove now really deletes the file
+ * InMemoryFile.Readdir and Readdirnames work correctly
+ * InMemoryFile functions lock it for concurrent access
+ * Test suite improvements
+* **0.8.0** 2014.10.28
+ * First public version
+ * Interfaces feel ready for people to build using
+ * Interfaces satisfy all known uses
+ * MemMapFs passes the majority of the OS test suite
+ * OsFs passes the majority of the OS test suite
+
+## Contributing
+
+1. Fork it
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Commit your changes (`git commit -am 'Add some feature'`)
+4. Push to the branch (`git push origin my-new-feature`)
+5. Create new Pull Request
+
+## Contributors
+
+Names in no particular order:
+
+* [spf13](https://github.com/spf13)
+* [jaqx0r](https://github.com/jaqx0r)
+* [mbertschler](https://github.com/mbertschler)
+* [xor-gate](https://github.com/xor-gate)
+
+## License
+
+Afero is released under the Apache 2.0 license. See
+[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go
new file mode 100644
index 000000000..f5b5e127c
--- /dev/null
+++ b/vendor/github.com/spf13/afero/afero.go
@@ -0,0 +1,108 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package afero provides types and methods for interacting with the filesystem,
+// as an abstraction layer.
+
+// Afero also provides a few implementations that are mostly interoperable. One that
+// uses the operating system filesystem, one that uses memory to store files
+// (cross platform) and an interface that should be implemented if you want to
+// provide your own filesystem.
+
+package afero
+
+import (
+ "errors"
+ "io"
+ "os"
+ "time"
+)
+
+type Afero struct {
+ Fs
+}
+
+// File represents a file in the filesystem.
+type File interface {
+ io.Closer
+ io.Reader
+ io.ReaderAt
+ io.Seeker
+ io.Writer
+ io.WriterAt
+
+ Name() string
+ Readdir(count int) ([]os.FileInfo, error)
+ Readdirnames(n int) ([]string, error)
+ Stat() (os.FileInfo, error)
+ Sync() error
+ Truncate(size int64) error
+ WriteString(s string) (ret int, err error)
+}
+
+// Fs is the filesystem interface.
+//
+// Any simulated or real filesystem should implement this interface.
+type Fs interface {
+ // Create creates a file in the filesystem, returning the file and an
+ // error, if any happens.
+ Create(name string) (File, error)
+
+ // Mkdir creates a directory in the filesystem, return an error if any
+ // happens.
+ Mkdir(name string, perm os.FileMode) error
+
+ // MkdirAll creates a directory path and all parents that does not exist
+ // yet.
+ MkdirAll(path string, perm os.FileMode) error
+
+ // Open opens a file, returning it or an error, if any happens.
+ Open(name string) (File, error)
+
+ // OpenFile opens a file using the given flags and the given mode.
+ OpenFile(name string, flag int, perm os.FileMode) (File, error)
+
+ // Remove removes a file identified by name, returning an error, if any
+ // happens.
+ Remove(name string) error
+
+ // RemoveAll removes a directory path and any children it contains. It
+ // does not fail if the path does not exist (return nil).
+ RemoveAll(path string) error
+
+ // Rename renames a file.
+ Rename(oldname, newname string) error
+
+ // Stat returns a FileInfo describing the named file, or an error, if any
+ // happens.
+ Stat(name string) (os.FileInfo, error)
+
+ // The name of this FileSystem
+ Name() string
+
+ //Chmod changes the mode of the named file to mode.
+ Chmod(name string, mode os.FileMode) error
+
+ //Chtimes changes the access and modification times of the named file
+ Chtimes(name string, atime time.Time, mtime time.Time) error
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("Out of range")
+ ErrTooLarge = errors.New("Too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/afero_test.go b/vendor/github.com/spf13/afero/afero_test.go
new file mode 100644
index 000000000..526afa975
--- /dev/null
+++ b/vendor/github.com/spf13/afero/afero_test.go
@@ -0,0 +1,699 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2009 The Go Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+ "testing"
+)
+
+var testName = "test.txt"
+var Fss = []Fs{&MemMapFs{}, &OsFs{}}
+
+var testRegistry map[Fs][]string = make(map[Fs][]string)
+
+func testDir(fs Fs) string {
+ name, err := TempDir(fs, "", "afero")
+ if err != nil {
+ panic(fmt.Sprint("unable to work with test dir", err))
+ }
+ testRegistry[fs] = append(testRegistry[fs], name)
+
+ return name
+}
+
+func tmpFile(fs Fs) File {
+ x, err := TempFile(fs, "", "afero")
+
+ if err != nil {
+ panic(fmt.Sprint("unable to work with temp file", err))
+ }
+
+ testRegistry[fs] = append(testRegistry[fs], x.Name())
+
+ return x
+}
+
+//Read with length 0 should not return EOF.
+func TestRead0(t *testing.T) {
+ for _, fs := range Fss {
+ f := tmpFile(fs)
+ defer f.Close()
+ f.WriteString("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.")
+
+ var b []byte
+ // b := make([]byte, 0)
+ n, err := f.Read(b)
+ if n != 0 || err != nil {
+ t.Errorf("%v: Read(0) = %d, %v, want 0, nil", fs.Name(), n, err)
+ }
+ f.Seek(0, 0)
+ b = make([]byte, 100)
+ n, err = f.Read(b)
+ if n <= 0 || err != nil {
+ t.Errorf("%v: Read(100) = %d, %v, want >0, nil", fs.Name(), n, err)
+ }
+ }
+}
+
+func TestOpenFile(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ tmp := testDir(fs)
+ path := filepath.Join(tmp, testName)
+
+ f, err := fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600)
+ if err != nil {
+ t.Error(fs.Name(), "OpenFile (O_CREATE) failed:", err)
+ continue
+ }
+ io.WriteString(f, "initial")
+ f.Close()
+
+ f, err = fs.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0600)
+ if err != nil {
+ t.Error(fs.Name(), "OpenFile (O_APPEND) failed:", err)
+ continue
+ }
+ io.WriteString(f, "|append")
+ f.Close()
+
+ f, err = fs.OpenFile(path, os.O_RDONLY, 0600)
+ contents, _ := ioutil.ReadAll(f)
+ expectedContents := "initial|append"
+ if string(contents) != expectedContents {
+ t.Errorf("%v: appending, expected '%v', got: '%v'", fs.Name(), expectedContents, string(contents))
+ }
+ f.Close()
+
+ f, err = fs.OpenFile(path, os.O_RDWR|os.O_TRUNC, 0600)
+ if err != nil {
+ t.Error(fs.Name(), "OpenFile (O_TRUNC) failed:", err)
+ continue
+ }
+ contents, _ = ioutil.ReadAll(f)
+ if string(contents) != "" {
+ t.Errorf("%v: expected truncated file, got: '%v'", fs.Name(), string(contents))
+ }
+ f.Close()
+ }
+}
+
+func TestCreate(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ tmp := testDir(fs)
+ path := filepath.Join(tmp, testName)
+
+ f, err := fs.Create(path)
+ if err != nil {
+ t.Error(fs.Name(), "Create failed:", err)
+ f.Close()
+ continue
+ }
+ io.WriteString(f, "initial")
+ f.Close()
+
+ f, err = fs.Create(path)
+ if err != nil {
+ t.Error(fs.Name(), "Create failed:", err)
+ f.Close()
+ continue
+ }
+ secondContent := "second create"
+ io.WriteString(f, secondContent)
+ f.Close()
+
+ f, err = fs.Open(path)
+ if err != nil {
+ t.Error(fs.Name(), "Open failed:", err)
+ f.Close()
+ continue
+ }
+ buf, err := ReadAll(f)
+ if err != nil {
+ t.Error(fs.Name(), "ReadAll failed:", err)
+ f.Close()
+ continue
+ }
+ if string(buf) != secondContent {
+ t.Error(fs.Name(), "Content should be", "\""+secondContent+"\" but is \""+string(buf)+"\"")
+ f.Close()
+ continue
+ }
+ f.Close()
+ }
+}
+
+func TestMemFileRead(t *testing.T) {
+ f := tmpFile(new(MemMapFs))
+ // f := MemFileCreate("testfile")
+ f.WriteString("abcd")
+ f.Seek(0, 0)
+ b := make([]byte, 8)
+ n, err := f.Read(b)
+ if n != 4 {
+ t.Errorf("didn't read all bytes: %v %v %v", n, err, b)
+ }
+ if err != nil {
+ t.Errorf("err is not nil: %v %v %v", n, err, b)
+ }
+ n, err = f.Read(b)
+ if n != 0 {
+ t.Errorf("read more bytes: %v %v %v", n, err, b)
+ }
+ if err != io.EOF {
+ t.Errorf("error is not EOF: %v %v %v", n, err, b)
+ }
+}
+
+func TestRename(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ tDir := testDir(fs)
+ from := filepath.Join(tDir, "/renamefrom")
+ to := filepath.Join(tDir, "/renameto")
+ exists := filepath.Join(tDir, "/renameexists")
+ file, err := fs.Create(from)
+ if err != nil {
+ t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err)
+ }
+ if err = file.Close(); err != nil {
+ t.Errorf("%s: close %q failed: %v", fs.Name(), to, err)
+ }
+ file, err = fs.Create(exists)
+ if err != nil {
+ t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err)
+ }
+ if err = file.Close(); err != nil {
+ t.Errorf("%s: close %q failed: %v", fs.Name(), to, err)
+ }
+ err = fs.Rename(from, to)
+ if err != nil {
+ t.Fatalf("%s: rename %q, %q failed: %v", fs.Name(), to, from, err)
+ }
+ file, err = fs.Create(from)
+ if err != nil {
+ t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err)
+ }
+ if err = file.Close(); err != nil {
+ t.Errorf("%s: close %q failed: %v", fs.Name(), to, err)
+ }
+ err = fs.Rename(from, exists)
+ if err != nil {
+ t.Errorf("%s: rename %q, %q failed: %v", fs.Name(), exists, from, err)
+ }
+ names, err := readDirNames(fs, tDir)
+ if err != nil {
+ t.Errorf("%s: readDirNames error: %v", fs.Name(), err)
+ }
+ found := false
+ for _, e := range names {
+ if e == "renamefrom" {
+ t.Error("File is still called renamefrom")
+ }
+ if e == "renameto" {
+ found = true
+ }
+ }
+ if !found {
+ t.Error("File was not renamed to renameto")
+ }
+
+ _, err = fs.Stat(to)
+ if err != nil {
+ t.Errorf("%s: stat %q failed: %v", fs.Name(), to, err)
+ }
+ }
+}
+
+func TestRemove(t *testing.T) {
+ for _, fs := range Fss {
+
+ x, err := TempFile(fs, "", "afero")
+ if err != nil {
+ t.Error(fmt.Sprint("unable to work with temp file", err))
+ }
+
+ path := x.Name()
+ x.Close()
+
+ tDir := filepath.Dir(path)
+
+ err = fs.Remove(path)
+ if err != nil {
+ t.Errorf("%v: Remove() failed: %v", fs.Name(), err)
+ continue
+ }
+
+ _, err = fs.Stat(path)
+ if !os.IsNotExist(err) {
+ t.Errorf("%v: Remove() didn't remove file", fs.Name())
+ continue
+ }
+
+ // Deleting non-existent file should raise error
+ err = fs.Remove(path)
+ if !os.IsNotExist(err) {
+ t.Errorf("%v: Remove() didn't raise error for non-existent file", fs.Name())
+ }
+
+ f, err := fs.Open(tDir)
+ if err != nil {
+ t.Error("TestDir should still exist:", err)
+ }
+
+ names, err := f.Readdirnames(-1)
+ if err != nil {
+ t.Error("Readdirnames failed:", err)
+ }
+
+ for _, e := range names {
+ if e == testName {
+ t.Error("File was not removed from parent directory")
+ }
+ }
+ }
+}
+
+func TestTruncate(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ f := tmpFile(fs)
+ defer f.Close()
+
+ checkSize(t, f, 0)
+ f.Write([]byte("hello, world\n"))
+ checkSize(t, f, 13)
+ f.Truncate(10)
+ checkSize(t, f, 10)
+ f.Truncate(1024)
+ checkSize(t, f, 1024)
+ f.Truncate(0)
+ checkSize(t, f, 0)
+ _, err := f.Write([]byte("surprise!"))
+ if err == nil {
+ checkSize(t, f, 13+9) // wrote at offset past where hello, world was.
+ }
+ }
+}
+
+func TestSeek(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ f := tmpFile(fs)
+ defer f.Close()
+
+ const data = "hello, world\n"
+ io.WriteString(f, data)
+
+ type test struct {
+ in int64
+ whence int
+ out int64
+ }
+ var tests = []test{
+ {0, 1, int64(len(data))},
+ {0, 0, 0},
+ {5, 0, 5},
+ {0, 2, int64(len(data))},
+ {0, 0, 0},
+ {-1, 2, int64(len(data)) - 1},
+ {1 << 33, 0, 1 << 33},
+ {1 << 33, 2, 1<<33 + int64(len(data))},
+ }
+ for i, tt := range tests {
+ off, err := f.Seek(tt.in, tt.whence)
+ if off != tt.out || err != nil {
+ if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL && tt.out > 1<<32 {
+ // Reiserfs rejects the big seeks.
+ // http://code.google.com/p/go/issues/detail?id=91
+ break
+ }
+ t.Errorf("#%d: Seek(%v, %v) = %v, %v want %v, nil", i, tt.in, tt.whence, off, err, tt.out)
+ }
+ }
+ }
+}
+
+func TestReadAt(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ f := tmpFile(fs)
+ defer f.Close()
+
+ const data = "hello, world\n"
+ io.WriteString(f, data)
+
+ b := make([]byte, 5)
+ n, err := f.ReadAt(b, 7)
+ if err != nil || n != len(b) {
+ t.Fatalf("ReadAt 7: %d, %v", n, err)
+ }
+ if string(b) != "world" {
+ t.Fatalf("ReadAt 7: have %q want %q", string(b), "world")
+ }
+ }
+}
+
+func TestWriteAt(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ f := tmpFile(fs)
+ defer f.Close()
+
+ const data = "hello, world\n"
+ io.WriteString(f, data)
+
+ n, err := f.WriteAt([]byte("WORLD"), 7)
+ if err != nil || n != 5 {
+ t.Fatalf("WriteAt 7: %d, %v", n, err)
+ }
+
+ f2, err := fs.Open(f.Name())
+ if err != nil {
+ t.Fatalf("%v: ReadFile %s: %v", fs.Name(), f.Name(), err)
+ }
+ defer f2.Close()
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(f2)
+ b := buf.Bytes()
+ if string(b) != "hello, WORLD\n" {
+ t.Fatalf("after write: have %q want %q", string(b), "hello, WORLD\n")
+ }
+
+ }
+}
+
+func setupTestDir(t *testing.T, fs Fs) string {
+ path := testDir(fs)
+ return setupTestFiles(t, fs, path)
+}
+
+func setupTestDirRoot(t *testing.T, fs Fs) string {
+ path := testDir(fs)
+ setupTestFiles(t, fs, path)
+ return path
+}
+
+func setupTestDirReusePath(t *testing.T, fs Fs, path string) string {
+ testRegistry[fs] = append(testRegistry[fs], path)
+ return setupTestFiles(t, fs, path)
+}
+
+func setupTestFiles(t *testing.T, fs Fs, path string) string {
+ testSubDir := filepath.Join(path, "more", "subdirectories", "for", "testing", "we")
+ err := fs.MkdirAll(testSubDir, 0700)
+ if err != nil && !os.IsExist(err) {
+ t.Fatal(err)
+ }
+
+ f, err := fs.Create(filepath.Join(testSubDir, "testfile1"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.WriteString("Testfile 1 content")
+ f.Close()
+
+ f, err = fs.Create(filepath.Join(testSubDir, "testfile2"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.WriteString("Testfile 2 content")
+ f.Close()
+
+ f, err = fs.Create(filepath.Join(testSubDir, "testfile3"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.WriteString("Testfile 3 content")
+ f.Close()
+
+ f, err = fs.Create(filepath.Join(testSubDir, "testfile4"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.WriteString("Testfile 4 content")
+ f.Close()
+ return testSubDir
+}
+
+func TestReaddirnames(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ testSubDir := setupTestDir(t, fs)
+ tDir := filepath.Dir(testSubDir)
+
+ root, err := fs.Open(tDir)
+ if err != nil {
+ t.Fatal(fs.Name(), tDir, err)
+ }
+ defer root.Close()
+
+ namesRoot, err := root.Readdirnames(-1)
+ if err != nil {
+ t.Fatal(fs.Name(), namesRoot, err)
+ }
+
+ sub, err := fs.Open(testSubDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sub.Close()
+
+ namesSub, err := sub.Readdirnames(-1)
+ if err != nil {
+ t.Fatal(fs.Name(), namesSub, err)
+ }
+
+ findNames(fs, t, tDir, testSubDir, namesRoot, namesSub)
+ }
+}
+
+func TestReaddirSimple(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ testSubDir := setupTestDir(t, fs)
+ tDir := filepath.Dir(testSubDir)
+
+ root, err := fs.Open(tDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer root.Close()
+
+ rootInfo, err := root.Readdir(1)
+ if err != nil {
+ t.Log(myFileInfo(rootInfo))
+ t.Error(err)
+ }
+
+ rootInfo, err = root.Readdir(5)
+ if err != io.EOF {
+ t.Log(myFileInfo(rootInfo))
+ t.Error(err)
+ }
+
+ sub, err := fs.Open(testSubDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sub.Close()
+
+ subInfo, err := sub.Readdir(5)
+ if err != nil {
+ t.Log(myFileInfo(subInfo))
+ t.Error(err)
+ }
+ }
+}
+
+func TestReaddir(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for num := 0; num < 6; num++ {
+ outputs := make([]string, len(Fss))
+ infos := make([]string, len(Fss))
+ for i, fs := range Fss {
+ testSubDir := setupTestDir(t, fs)
+ //tDir := filepath.Dir(testSubDir)
+ root, err := fs.Open(testSubDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer root.Close()
+
+ for j := 0; j < 6; j++ {
+ info, err := root.Readdir(num)
+ outputs[i] += fmt.Sprintf("%v Error: %v\n", myFileInfo(info), err)
+ infos[i] += fmt.Sprintln(len(info), err)
+ }
+ }
+
+ fail := false
+ for i, o := range infos {
+ if i == 0 {
+ continue
+ }
+ if o != infos[i-1] {
+ fail = true
+ break
+ }
+ }
+ if fail {
+ t.Log("Readdir outputs not equal for Readdir(", num, ")")
+ for i, o := range outputs {
+ t.Log(Fss[i].Name())
+ t.Log(o)
+ }
+ t.Fail()
+ }
+ }
+}
+
+type myFileInfo []os.FileInfo
+
+func (m myFileInfo) String() string {
+ out := "Fileinfos:\n"
+ for _, e := range m {
+ out += " " + e.Name() + "\n"
+ }
+ return out
+}
+
+func TestReaddirAll(t *testing.T) {
+ defer removeAllTestFiles(t)
+ for _, fs := range Fss {
+ testSubDir := setupTestDir(t, fs)
+ tDir := filepath.Dir(testSubDir)
+
+ root, err := fs.Open(tDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer root.Close()
+
+ rootInfo, err := root.Readdir(-1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var namesRoot = []string{}
+ for _, e := range rootInfo {
+ namesRoot = append(namesRoot, e.Name())
+ }
+
+ sub, err := fs.Open(testSubDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sub.Close()
+
+ subInfo, err := sub.Readdir(-1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var namesSub = []string{}
+ for _, e := range subInfo {
+ namesSub = append(namesSub, e.Name())
+ }
+
+ findNames(fs, t, tDir, testSubDir, namesRoot, namesSub)
+ }
+}
+
+func findNames(fs Fs, t *testing.T, tDir, testSubDir string, root, sub []string) {
+ var foundRoot bool
+ for _, e := range root {
+ f, err := fs.Open(filepath.Join(tDir, e))
+ if err != nil {
+ t.Error("Open", filepath.Join(tDir, e), ":", err)
+ }
+ defer f.Close()
+
+ if equal(e, "we") {
+ foundRoot = true
+ }
+ }
+ if !foundRoot {
+ t.Logf("Names root: %v", root)
+ t.Logf("Names sub: %v", sub)
+ t.Error("Didn't find subdirectory we")
+ }
+
+ var found1, found2 bool
+ for _, e := range sub {
+ f, err := fs.Open(filepath.Join(testSubDir, e))
+ if err != nil {
+ t.Error("Open", filepath.Join(testSubDir, e), ":", err)
+ }
+ defer f.Close()
+
+ if equal(e, "testfile1") {
+ found1 = true
+ }
+ if equal(e, "testfile2") {
+ found2 = true
+ }
+ }
+
+ if !found1 {
+ t.Logf("Names root: %v", root)
+ t.Logf("Names sub: %v", sub)
+ t.Error("Didn't find testfile1")
+ }
+ if !found2 {
+ t.Logf("Names root: %v", root)
+ t.Logf("Names sub: %v", sub)
+ t.Error("Didn't find testfile2")
+ }
+}
+
+func removeAllTestFiles(t *testing.T) {
+ for fs, list := range testRegistry {
+ for _, path := range list {
+ if err := fs.RemoveAll(path); err != nil {
+ t.Error(fs.Name(), err)
+ }
+ }
+ }
+ testRegistry = make(map[Fs][]string)
+}
+
+func equal(name1, name2 string) (r bool) {
+ switch runtime.GOOS {
+ case "windows":
+ r = strings.ToLower(name1) == strings.ToLower(name2)
+ default:
+ r = name1 == name2
+ }
+ return
+}
+
+func checkSize(t *testing.T, f File, size int64) {
+ dir, err := f.Stat()
+ if err != nil {
+ t.Fatalf("Stat %q (looking for size %d): %s", f.Name(), size, err)
+ }
+ if dir.Size() != size {
+ t.Errorf("Stat %q: size %d want %d", f.Name(), dir.Size(), size)
+ }
+}
diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml
new file mode 100644
index 000000000..006f31534
--- /dev/null
+++ b/vendor/github.com/spf13/afero/appveyor.yml
@@ -0,0 +1,15 @@
+version: '{build}'
+clone_folder: C:\gopath\src\github.com\spf13\afero
+environment:
+ GOPATH: C:\gopath
+build_script:
+- cmd: >-
+ go version
+
+ go env
+
+ go get -v github.com/spf13/afero/...
+
+ go build github.com/spf13/afero
+test_script:
+- cmd: go test -v github.com/spf13/afero
diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go
new file mode 100644
index 000000000..5e4fc2ec0
--- /dev/null
+++ b/vendor/github.com/spf13/afero/basepath.go
@@ -0,0 +1,145 @@
+package afero
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// The BasePathFs restricts all operations to a given path within an Fs.
+// The given file name to the operations on this Fs will be prepended with
+// the base path before calling the base Fs.
+// Any file name (after filepath.Clean()) outside this base path will be
+// treated as non existing file.
+//
+// Note that it does not clean the error messages on return, so you may
+// reveal the real path on errors.
+type BasePathFs struct {
+ source Fs
+ path string
+}
+
+func NewBasePathFs(source Fs, path string) Fs {
+ return &BasePathFs{source: source, path: path}
+}
+
+// on a file outside the base path it returns the given file name and an error,
+// else the given file with the base path prepended
+func (b *BasePathFs) RealPath(name string) (path string, err error) {
+ if err := validateBasePathName(name); err != nil {
+ return "", err
+ }
+
+ bpath := filepath.Clean(b.path)
+ path = filepath.Clean(filepath.Join(bpath, name))
+ if !strings.HasPrefix(path, bpath) {
+ return name, os.ErrNotExist
+ }
+
+ return path, nil
+}
+
+func validateBasePathName(name string) error {
+ if runtime.GOOS != "windows" {
+ // Not much to do here;
+ // the virtual file paths all look absolute on *nix.
+ return nil
+ }
+
+ // On Windows a common mistake would be to provide an absolute OS path
+ // We could strip out the base part, but that would not be very portable.
+ if filepath.IsAbs(name) {
+ return &os.PathError{Op: "realPath", Path: name, Err: errors.New("got a real OS path instead of a virtual")}
+ }
+
+ return nil
+}
+
+func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chtimes", Path: name, Err: err}
+ }
+ return b.source.Chtimes(name, atime, mtime)
+}
+
+func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "chmod", Path: name, Err: err}
+ }
+ return b.source.Chmod(name, mode)
+}
+
+func (b *BasePathFs) Name() string {
+ return "BasePathFs"
+}
+
+func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "stat", Path: name, Err: err}
+ }
+ return b.source.Stat(name)
+}
+
+func (b *BasePathFs) Rename(oldname, newname string) (err error) {
+ if oldname, err = b.RealPath(oldname); err != nil {
+ return &os.PathError{Op: "rename", Path: oldname, Err: err}
+ }
+ if newname, err = b.RealPath(newname); err != nil {
+ return &os.PathError{Op: "rename", Path: newname, Err: err}
+ }
+ return b.source.Rename(oldname, newname)
+}
+
+func (b *BasePathFs) RemoveAll(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove_all", Path: name, Err: err}
+ }
+ return b.source.RemoveAll(name)
+}
+
+func (b *BasePathFs) Remove(name string) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ return b.source.Remove(name)
+}
+
+func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
+ }
+ return b.source.OpenFile(name, flag, mode)
+}
+
+func (b *BasePathFs) Open(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "open", Path: name, Err: err}
+ }
+ return b.source.Open(name)
+}
+
+func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.Mkdir(name, mode)
+}
+
+func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ return b.source.MkdirAll(name, mode)
+}
+
+func (b *BasePathFs) Create(name string) (f File, err error) {
+ if name, err = b.RealPath(name); err != nil {
+ return nil, &os.PathError{Op: "create", Path: name, Err: err}
+ }
+ return b.source.Create(name)
+}
+
+// vim: ts=4 sw=4 noexpandtab nolist syn=go
diff --git a/vendor/github.com/spf13/afero/basepath_test.go b/vendor/github.com/spf13/afero/basepath_test.go
new file mode 100644
index 000000000..abc22b9f6
--- /dev/null
+++ b/vendor/github.com/spf13/afero/basepath_test.go
@@ -0,0 +1,142 @@
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+func TestBasePath(t *testing.T) {
+ baseFs := &MemMapFs{}
+ baseFs.MkdirAll("/base/path/tmp", 0777)
+ bp := NewBasePathFs(baseFs, "/base/path")
+
+ if _, err := bp.Create("/tmp/foo"); err != nil {
+ t.Errorf("Failed to set real path")
+ }
+
+ if fh, err := bp.Create("../tmp/bar"); err == nil {
+ t.Errorf("succeeded in creating %s ...", fh.Name())
+ }
+}
+
+func TestBasePathRoot(t *testing.T) {
+ baseFs := &MemMapFs{}
+ baseFs.MkdirAll("/base/path/foo/baz", 0777)
+ baseFs.MkdirAll("/base/path/boo/", 0777)
+ bp := NewBasePathFs(baseFs, "/base/path")
+
+ rd, err := ReadDir(bp, string(os.PathSeparator))
+
+ if len(rd) != 2 {
+ t.Errorf("base path doesn't respect root")
+ }
+
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestRealPath(t *testing.T) {
+ fs := NewOsFs()
+ baseDir, err := TempDir(fs, "", "base")
+ if err != nil {
+ t.Fatal("error creating tempDir", err)
+ }
+ defer fs.RemoveAll(baseDir)
+ anotherDir, err := TempDir(fs, "", "another")
+ if err != nil {
+ t.Fatal("error creating tempDir", err)
+ }
+ defer fs.RemoveAll(anotherDir)
+
+ bp := NewBasePathFs(fs, baseDir).(*BasePathFs)
+
+ subDir := filepath.Join(baseDir, "s1")
+
+ realPath, err := bp.RealPath("/s1")
+
+ if err != nil {
+ t.Errorf("Got error %s", err)
+ }
+
+ if realPath != subDir {
+ t.Errorf("Expected \n%s got \n%s", subDir, realPath)
+ }
+
+ if runtime.GOOS == "windows" {
+ _, err = bp.RealPath(anotherDir)
+
+ if err == nil {
+ t.Errorf("Expected error")
+ }
+
+ } else {
+ // on *nix we have no way of just looking at the path and tell that anotherDir
+ // is not inside the base file system.
+ // The user will receive an os.ErrNotExist later.
+ surrealPath, err := bp.RealPath(anotherDir)
+
+ if err != nil {
+ t.Errorf("Got error %s", err)
+ }
+
+ excpected := filepath.Join(baseDir, anotherDir)
+
+ if surrealPath != excpected {
+ t.Errorf("Expected \n%s got \n%s", excpected, surrealPath)
+ }
+ }
+
+}
+
+func TestNestedBasePaths(t *testing.T) {
+ type dirSpec struct {
+ Dir1, Dir2, Dir3 string
+ }
+ dirSpecs := []dirSpec{
+ dirSpec{Dir1: "/", Dir2: "/", Dir3: "/"},
+ dirSpec{Dir1: "/", Dir2: "/path2", Dir3: "/"},
+ dirSpec{Dir1: "/path1/dir", Dir2: "/path2/dir/", Dir3: "/path3/dir"},
+ dirSpec{Dir1: "C:/path1", Dir2: "path2/dir", Dir3: "/path3/dir/"},
+ }
+
+ for _, ds := range dirSpecs {
+ memFs := NewMemMapFs()
+ level1Fs := NewBasePathFs(memFs, ds.Dir1)
+ level2Fs := NewBasePathFs(level1Fs, ds.Dir2)
+ level3Fs := NewBasePathFs(level2Fs, ds.Dir3)
+
+ type spec struct {
+ BaseFs Fs
+ FileName string
+ }
+ specs := []spec{
+ spec{BaseFs: level3Fs, FileName: "f.txt"},
+ spec{BaseFs: level2Fs, FileName: "f.txt"},
+ spec{BaseFs: level1Fs, FileName: "f.txt"},
+ }
+
+ for _, s := range specs {
+ if err := s.BaseFs.MkdirAll(s.FileName, 0755); err != nil {
+ t.Errorf("Got error %s", err.Error())
+ }
+ if _, err := s.BaseFs.Stat(s.FileName); err != nil {
+ t.Errorf("Got error %s", err.Error())
+ }
+
+ if s.BaseFs == level3Fs {
+ pathToExist := filepath.Join(ds.Dir3, s.FileName)
+ if _, err := level2Fs.Stat(pathToExist); err != nil {
+ t.Errorf("Got error %s (path %s)", err.Error(), pathToExist)
+ }
+ } else if s.BaseFs == level2Fs {
+ pathToExist := filepath.Join(ds.Dir2, ds.Dir3, s.FileName)
+ if _, err := level1Fs.Stat(pathToExist); err != nil {
+ t.Errorf("Got error %s (path %s)", err.Error(), pathToExist)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go
new file mode 100644
index 000000000..e54a4f8b4
--- /dev/null
+++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -0,0 +1,295 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+// If the cache duration is 0, cache time will be unlimited, i.e. once
+// a file is in the layer, the base will never be read again for this file.
+//
+// For cache times greater than 0, the modification time of a file is
+// checked. Note that a lot of file system implementations only allow a
+// resolution of a second for timestamps... or as the godoc for os.Chtimes()
+// states: "The underlying filesystem may truncate or round the values to a
+// less precise time unit."
+//
+// This caching union will forward all write calls also to the base file
+// system first. To prevent writing to the base Fs, wrap it in a read-only
+// filter - Note: this will also make the overlay read-only, for writing files
+// in the overlay, use the overlay Fs directly, not via the union Fs.
+type CacheOnReadFs struct {
+ base Fs
+ layer Fs
+ cacheTime time.Duration
+}
+
+func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
+ return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
+}
+
+type cacheState int
+
+const (
+ // not present in the overlay, unknown if it exists in the base:
+ cacheMiss cacheState = iota
+ // present in the overlay and in base, base file is newer:
+ cacheStale
+ // present in the overlay - with cache time == 0 it may exist in the base,
+ // with cacheTime > 0 it exists in the base and is same age or newer in the
+ // overlay
+ cacheHit
+ // happens if someone writes directly to the overlay without
+ // going through this union
+ cacheLocal
+)
+
+func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
+ var lfi, bfi os.FileInfo
+ lfi, err = u.layer.Stat(name)
+ if err == nil {
+ if u.cacheTime == 0 {
+ return cacheHit, lfi, nil
+ }
+ if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
+ bfi, err = u.base.Stat(name)
+ if err != nil {
+ return cacheLocal, lfi, nil
+ }
+ if bfi.ModTime().After(lfi.ModTime()) {
+ return cacheStale, bfi, nil
+ }
+ }
+ return cacheHit, lfi, nil
+ }
+
+ if err == syscall.ENOENT {
+ return cacheMiss, nil, nil
+ }
+ var ok bool
+ if err, ok = err.(*os.PathError); ok {
+ if err == os.ErrNotExist {
+ return cacheMiss, nil, nil
+ }
+ }
+ return cacheMiss, nil, err
+}
+
+func (u *CacheOnReadFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chtimes(name, atime, mtime)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chtimes(name, atime, mtime)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Chmod(name, mode)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ err = u.base.Chmod(name, mode)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheMiss:
+ return u.base.Stat(name)
+ default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
+ return fi, nil
+ }
+}
+
+func (u *CacheOnReadFs) Rename(oldname, newname string) error {
+ st, _, err := u.cacheStatus(oldname)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit:
+ err = u.base.Rename(oldname, newname)
+ case cacheStale, cacheMiss:
+ if err := u.copyToLayer(oldname); err != nil {
+ return err
+ }
+ err = u.base.Rename(oldname, newname)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+func (u *CacheOnReadFs) Remove(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.Remove(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.Remove(name)
+}
+
+func (u *CacheOnReadFs) RemoveAll(name string) error {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return err
+ }
+ switch st {
+ case cacheLocal:
+ case cacheHit, cacheStale, cacheMiss:
+ err = u.base.RemoveAll(name)
+ }
+ if err != nil {
+ return err
+ }
+ return u.layer.RemoveAll(name)
+}
+
+func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ st, _, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+ switch st {
+ case cacheLocal, cacheHit:
+ default:
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ }
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ bfi, err := u.base.OpenFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ lfi, err := u.layer.OpenFile(name, flag, perm)
+ if err != nil {
+ bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
+ return nil, err
+ }
+ return &UnionFile{base: bfi, layer: lfi}, nil
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+func (u *CacheOnReadFs) Open(name string) (File, error) {
+ st, fi, err := u.cacheStatus(name)
+ if err != nil {
+ return nil, err
+ }
+
+ switch st {
+ case cacheLocal:
+ return u.layer.Open(name)
+
+ case cacheMiss:
+ bfi, err := u.base.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if bfi.IsDir() {
+ return u.base.Open(name)
+ }
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+
+ case cacheStale:
+ if !fi.IsDir() {
+ if err := u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.Open(name)
+ }
+ case cacheHit:
+ if !fi.IsDir() {
+ return u.layer.Open(name)
+ }
+ }
+ // the dirs from cacheHit, cacheStale fall down here:
+ bfile, _ := u.base.Open(name)
+ lfile, err := u.layer.Open(name)
+ if err != nil && bfile == nil {
+ return nil, err
+ }
+ return &UnionFile{base: bfile, layer: lfile}, nil
+}
+
+func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
+ err := u.base.Mkdir(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
+}
+
+func (u *CacheOnReadFs) Name() string {
+ return "CacheOnReadFs"
+}
+
+func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
+ err := u.base.MkdirAll(name, perm)
+ if err != nil {
+ return err
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CacheOnReadFs) Create(name string) (File, error) {
+ bfh, err := u.base.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ lfh, err := u.layer.Create(name)
+ if err != nil {
+ // oops, see comment about OS_TRUNC above, should we remove? then we have to
+ // remember if the file did not exist before
+ bfh.Close()
+ return nil, err
+ }
+ return &UnionFile{base: bfh, layer: lfh}, nil
+}
diff --git a/vendor/github.com/spf13/afero/composite_test.go b/vendor/github.com/spf13/afero/composite_test.go
new file mode 100644
index 000000000..e8ac1a818
--- /dev/null
+++ b/vendor/github.com/spf13/afero/composite_test.go
@@ -0,0 +1,368 @@
+package afero
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+)
+
+var tempDirs []string
+
+func NewTempOsBaseFs(t *testing.T) Fs {
+ name, err := TempDir(NewOsFs(), "", "")
+ if err != nil {
+ t.Error("error creating tempDir", err)
+ }
+
+ tempDirs = append(tempDirs, name)
+
+ return NewBasePathFs(NewOsFs(), name)
+}
+
+func CleanupTempDirs(t *testing.T) {
+ osfs := NewOsFs()
+ type ev struct {
+ path string
+ e error
+ }
+
+ errs := []ev{}
+
+ for _, x := range tempDirs {
+ err := osfs.RemoveAll(x)
+ if err != nil {
+ errs = append(errs, ev{path: x, e: err})
+ }
+ }
+
+ for _, e := range errs {
+ fmt.Println("error removing tempDir", e.path, e.e)
+ }
+
+ if len(errs) > 0 {
+ t.Error("error cleaning up tempDirs")
+ }
+ tempDirs = []string{}
+}
+
+func TestUnionCreateExisting(t *testing.T) {
+ base := &MemMapFs{}
+ roBase := &ReadOnlyFs{source: base}
+
+ ufs := NewCopyOnWriteFs(roBase, &MemMapFs{})
+
+ base.MkdirAll("/home/test", 0777)
+ fh, _ := base.Create("/home/test/file.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ fh, err := ufs.OpenFile("/home/test/file.txt", os.O_RDWR, 0666)
+ if err != nil {
+ t.Errorf("Failed to open file r/w: %s", err)
+ }
+
+ _, err = fh.Write([]byte("####"))
+ if err != nil {
+ t.Errorf("Failed to write file: %s", err)
+ }
+ fh.Seek(0, 0)
+ data, err := ioutil.ReadAll(fh)
+ if err != nil {
+ t.Errorf("Failed to read file: %s", err)
+ }
+ if string(data) != "#### is a test" {
+ t.Errorf("Got wrong data")
+ }
+ fh.Close()
+
+ fh, _ = base.Open("/home/test/file.txt")
+ data, err = ioutil.ReadAll(fh)
+ if string(data) != "This is a test" {
+ t.Errorf("Got wrong data in base file")
+ }
+ fh.Close()
+
+ fh, err = ufs.Create("/home/test/file.txt")
+ switch err {
+ case nil:
+ if fi, _ := fh.Stat(); fi.Size() != 0 {
+ t.Errorf("Create did not truncate file")
+ }
+ fh.Close()
+ default:
+ t.Errorf("Create failed on existing file")
+ }
+
+}
+
+func TestUnionMergeReaddir(t *testing.T) {
+ base := &MemMapFs{}
+ roBase := &ReadOnlyFs{source: base}
+
+ ufs := &CopyOnWriteFs{base: roBase, layer: &MemMapFs{}}
+
+ base.MkdirAll("/home/test", 0777)
+ fh, _ := base.Create("/home/test/file.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ fh, _ = ufs.Create("/home/test/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ fh, _ = ufs.Open("/home/test")
+ files, err := fh.Readdirnames(-1)
+ if err != nil {
+ t.Errorf("Readdirnames failed")
+ }
+ if len(files) != 2 {
+ t.Errorf("Got wrong number of files: %v", files)
+ }
+}
+
+func TestExistingDirectoryCollisionReaddir(t *testing.T) {
+ base := &MemMapFs{}
+ roBase := &ReadOnlyFs{source: base}
+ overlay := &MemMapFs{}
+
+ ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
+
+ base.MkdirAll("/home/test", 0777)
+ fh, _ := base.Create("/home/test/file.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ overlay.MkdirAll("home/test", 0777)
+ fh, _ = overlay.Create("/home/test/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ fh, _ = ufs.Create("/home/test/file3.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ fh, _ = ufs.Open("/home/test")
+ files, err := fh.Readdirnames(-1)
+ if err != nil {
+ t.Errorf("Readdirnames failed")
+ }
+ if len(files) != 3 {
+ t.Errorf("Got wrong number of files in union: %v", files)
+ }
+
+ fh, _ = overlay.Open("/home/test")
+ files, err = fh.Readdirnames(-1)
+ if err != nil {
+ t.Errorf("Readdirnames failed")
+ }
+ if len(files) != 2 {
+ t.Errorf("Got wrong number of files in overlay: %v", files)
+ }
+}
+
+func TestNestedDirBaseReaddir(t *testing.T) {
+ base := &MemMapFs{}
+ roBase := &ReadOnlyFs{source: base}
+ overlay := &MemMapFs{}
+
+ ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
+
+ base.MkdirAll("/home/test/foo/bar", 0777)
+ fh, _ := base.Create("/home/test/file.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ fh, _ = base.Create("/home/test/foo/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+ fh, _ = base.Create("/home/test/foo/bar/file3.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ overlay.MkdirAll("/", 0777)
+
+ // Opening something only in the base
+ fh, _ = ufs.Open("/home/test/foo")
+ list, err := fh.Readdir(-1)
+ if err != nil {
+ t.Errorf("Readdir failed %s", err)
+ }
+ if len(list) != 2 {
+ for _, x := range list {
+ fmt.Println(x.Name())
+ }
+ t.Errorf("Got wrong number of files in union: %v", len(list))
+ }
+}
+
+func TestNestedDirOverlayReaddir(t *testing.T) {
+ base := &MemMapFs{}
+ roBase := &ReadOnlyFs{source: base}
+ overlay := &MemMapFs{}
+
+ ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
+
+ base.MkdirAll("/", 0777)
+ overlay.MkdirAll("/home/test/foo/bar", 0777)
+ fh, _ := overlay.Create("/home/test/file.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+ fh, _ = overlay.Create("/home/test/foo/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+ fh, _ = overlay.Create("/home/test/foo/bar/file3.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ // Opening nested dir only in the overlay
+ fh, _ = ufs.Open("/home/test/foo")
+ list, err := fh.Readdir(-1)
+ if err != nil {
+ t.Errorf("Readdir failed %s", err)
+ }
+ if len(list) != 2 {
+ for _, x := range list {
+ fmt.Println(x.Name())
+ }
+ t.Errorf("Got wrong number of files in union: %v", len(list))
+ }
+}
+
+func TestNestedDirOverlayOsFsReaddir(t *testing.T) {
+ defer CleanupTempDirs(t)
+ base := NewTempOsBaseFs(t)
+ roBase := &ReadOnlyFs{source: base}
+ overlay := NewTempOsBaseFs(t)
+
+ ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
+
+ base.MkdirAll("/", 0777)
+ overlay.MkdirAll("/home/test/foo/bar", 0777)
+ fh, _ := overlay.Create("/home/test/file.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+ fh, _ = overlay.Create("/home/test/foo/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+ fh, _ = overlay.Create("/home/test/foo/bar/file3.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ // Opening nested dir only in the overlay
+ fh, _ = ufs.Open("/home/test/foo")
+ list, err := fh.Readdir(-1)
+ fh.Close()
+ if err != nil {
+ t.Errorf("Readdir failed %s", err)
+ }
+ if len(list) != 2 {
+ for _, x := range list {
+ fmt.Println(x.Name())
+ }
+ t.Errorf("Got wrong number of files in union: %v", len(list))
+ }
+}
+
+func TestCopyOnWriteFsWithOsFs(t *testing.T) {
+ defer CleanupTempDirs(t)
+ base := NewTempOsBaseFs(t)
+ roBase := &ReadOnlyFs{source: base}
+ overlay := NewTempOsBaseFs(t)
+
+ ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
+
+ base.MkdirAll("/home/test", 0777)
+ fh, _ := base.Create("/home/test/file.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ overlay.MkdirAll("home/test", 0777)
+ fh, _ = overlay.Create("/home/test/file2.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ fh, _ = ufs.Create("/home/test/file3.txt")
+ fh.WriteString("This is a test")
+ fh.Close()
+
+ fh, _ = ufs.Open("/home/test")
+ files, err := fh.Readdirnames(-1)
+ fh.Close()
+ if err != nil {
+ t.Errorf("Readdirnames failed")
+ }
+ if len(files) != 3 {
+ t.Errorf("Got wrong number of files in union: %v", files)
+ }
+
+ fh, _ = overlay.Open("/home/test")
+ files, err = fh.Readdirnames(-1)
+ fh.Close()
+ if err != nil {
+ t.Errorf("Readdirnames failed")
+ }
+ if len(files) != 2 {
+ t.Errorf("Got wrong number of files in overlay: %v", files)
+ }
+}
+
+func TestUnionCacheWrite(t *testing.T) {
+ base := &MemMapFs{}
+ layer := &MemMapFs{}
+
+ ufs := NewCacheOnReadFs(base, layer, 0)
+
+ base.Mkdir("/data", 0777)
+
+ fh, err := ufs.Create("/data/file.txt")
+ if err != nil {
+ t.Errorf("Failed to create file")
+ }
+ _, err = fh.Write([]byte("This is a test"))
+ if err != nil {
+ t.Errorf("Failed to write file")
+ }
+
+ fh.Seek(0, os.SEEK_SET)
+ buf := make([]byte, 4)
+ _, err = fh.Read(buf)
+ fh.Write([]byte(" IS A"))
+ fh.Close()
+
+ baseData, _ := ReadFile(base, "/data/file.txt")
+ layerData, _ := ReadFile(layer, "/data/file.txt")
+ if string(baseData) != string(layerData) {
+ t.Errorf("Different data: %s <=> %s", baseData, layerData)
+ }
+}
+
+func TestUnionCacheExpire(t *testing.T) {
+ base := &MemMapFs{}
+ layer := &MemMapFs{}
+ ufs := &CacheOnReadFs{base: base, layer: layer, cacheTime: 1 * time.Second}
+
+ base.Mkdir("/data", 0777)
+
+ fh, err := ufs.Create("/data/file.txt")
+ if err != nil {
+ t.Errorf("Failed to create file")
+ }
+ _, err = fh.Write([]byte("This is a test"))
+ if err != nil {
+ t.Errorf("Failed to write file")
+ }
+ fh.Close()
+
+ fh, _ = base.Create("/data/file.txt")
+ // sleep some time, so we really get a different time.Now() on write...
+ time.Sleep(2 * time.Second)
+ fh.WriteString("Another test")
+ fh.Close()
+
+ data, _ := ReadFile(ufs, "/data/file.txt")
+ if string(data) != "Another test" {
+ t.Errorf("cache time failed: <%s>", data)
+ }
+}
diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go
new file mode 100644
index 000000000..5728243d9
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_bsds.go
@@ -0,0 +1,22 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin openbsd freebsd netbsd dragonfly
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADF
diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go
new file mode 100644
index 000000000..968fc2783
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_win_unix.go
@@ -0,0 +1,25 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// +build !darwin
+// +build !openbsd
+// +build !freebsd
+// +build !dragonfly
+// +build !netbsd
+
+package afero
+
+import (
+ "syscall"
+)
+
+const BADFD = syscall.EBADFD
diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go
new file mode 100644
index 000000000..ed692ae95
--- /dev/null
+++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go
@@ -0,0 +1,253 @@
+package afero
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+ "time"
+)
+
+// The CopyOnWriteFs is a union filesystem: a read only base file system with
+// a possibly writeable layer on top. Changes to the file system will only
+// be made in the overlay: Changing an existing file in the base layer which
+// is not present in the overlay will copy the file to the overlay ("changing"
+// includes also calls to e.g. Chtimes() and Chmod()).
+//
+// Reading directories is currently only supported via Open(), not OpenFile().
+type CopyOnWriteFs struct {
+ base Fs
+ layer Fs
+}
+
+func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
+ return &CopyOnWriteFs{base: base, layer: layer}
+}
+
+// Returns true if the file is not in the overlay
+func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
+ if _, err := u.layer.Stat(name); err == nil {
+ return false, nil
+ }
+ _, err := u.base.Stat(name)
+ if err != nil {
+ if oerr, ok := err.(*os.PathError); ok {
+ if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
+ return false, nil
+ }
+ }
+ if err == syscall.ENOENT {
+ return false, nil
+ }
+ }
+ return true, err
+}
+
+func (u *CopyOnWriteFs) copyToLayer(name string) error {
+ return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return err
+ }
+ if b {
+ if err := u.copyToLayer(name); err != nil {
+ return err
+ }
+ }
+ return u.layer.Chmod(name, mode)
+}
+
+func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
+ fi, err := u.layer.Stat(name)
+ if err != nil {
+ origErr := err
+ if e, ok := err.(*os.PathError); ok {
+ err = e.Err
+ }
+ if err == syscall.ENOENT || err == syscall.ENOTDIR {
+ return u.base.Stat(name)
+ }
+ return nil, origErr
+ }
+ return fi, nil
+}
+
+// Renaming files present only in the base layer is not permitted
+func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
+ b, err := u.isBaseFile(oldname)
+ if err != nil {
+ return err
+ }
+ if b {
+ return syscall.EPERM
+ }
+ return u.layer.Rename(oldname, newname)
+}
+
+// Removing files present only in the base layer is not permitted. If
+// a file is present in the base layer and the overlay, only the overlay
+// will be removed.
+func (u *CopyOnWriteFs) Remove(name string) error {
+ err := u.layer.Remove(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) RemoveAll(name string) error {
+ err := u.layer.RemoveAll(name)
+ switch err {
+ case syscall.ENOENT:
+ _, err = u.base.Stat(name)
+ if err == nil {
+ return syscall.EPERM
+ }
+ return syscall.ENOENT
+ default:
+ return err
+ }
+}
+
+func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ if b {
+ if err = u.copyToLayer(name); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ dir := filepath.Dir(name)
+ isaDir, err := IsDir(u.base, dir)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ if isaDir {
+ if err = u.layer.MkdirAll(dir, 0777); err != nil {
+ return nil, err
+ }
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ isaDir, err = IsDir(u.layer, dir)
+ if err != nil {
+ return nil, err
+ }
+ if isaDir {
+ return u.layer.OpenFile(name, flag, perm)
+ }
+
+ return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
+ }
+ if b {
+ return u.base.OpenFile(name, flag, perm)
+ }
+ return u.layer.OpenFile(name, flag, perm)
+}
+
+// This function handles the 9 different possibilities caused
+// by the union which are the intersection of the following...
+// layer: doesn't exist, exists as a file, and exists as a directory
+// base: doesn't exist, exists as a file, and exists as a directory
+func (u *CopyOnWriteFs) Open(name string) (File, error) {
+ // Since the overlay overrides the base we check that first
+ b, err := u.isBaseFile(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If overlay doesn't exist, return the base (base state irrelevant)
+ if b {
+ return u.base.Open(name)
+ }
+
+ // If overlay is a file, return it (base state irrelevant)
+ dir, err := IsDir(u.layer, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ return u.layer.Open(name)
+ }
+
+ // Overlay is a directory, base state now matters.
+ // Base state has 3 states to check but 2 outcomes:
+ // A. It's a file or non-readable in the base (return just the overlay)
+ // B. It's an accessible directory in the base (return a UnionFile)
+
+ // If base is file or nonreadable, return overlay
+ dir, err = IsDir(u.base, name)
+ if !dir || err != nil {
+ return u.layer.Open(name)
+ }
+
+ // Both base & layer are directories
+ // Return union file (if opens are without error)
+ bfile, bErr := u.base.Open(name)
+ lfile, lErr := u.layer.Open(name)
+
+ // If either have errors at this point something is very wrong. Return nil and the errors
+ if bErr != nil || lErr != nil {
+ return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
+ }
+
+ return &UnionFile{base: bfile, layer: lfile}, nil
+}
+
+func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ return syscall.EEXIST
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Name() string {
+ return "CopyOnWriteFs"
+}
+
+func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
+ dir, err := IsDir(u.base, name)
+ if err != nil {
+ return u.layer.MkdirAll(name, perm)
+ }
+ if dir {
+ return syscall.EEXIST
+ }
+ return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Create(name string) (File, error) {
+ return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
+}
diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs_test.go b/vendor/github.com/spf13/afero/copyOnWriteFs_test.go
new file mode 100644
index 000000000..2a00fab72
--- /dev/null
+++ b/vendor/github.com/spf13/afero/copyOnWriteFs_test.go
@@ -0,0 +1,23 @@
+package afero
+
+import "testing"
+
+func TestCopyOnWrite(t *testing.T) {
+ var fs Fs
+ var err error
+ base := NewOsFs()
+ roBase := NewReadOnlyFs(base)
+ ufs := NewCopyOnWriteFs(roBase, NewMemMapFs())
+ fs = ufs
+ err = fs.MkdirAll("nonexistent/directory/", 0744)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ _, err = fs.Create("nonexistent/directory/newfile")
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+}
diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go
new file mode 100644
index 000000000..c42193688
--- /dev/null
+++ b/vendor/github.com/spf13/afero/httpFs.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "errors"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+type httpDir struct {
+ basePath string
+ fs HttpFs
+}
+
+func (d httpDir) Open(name string) (http.File, error) {
+ if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
+ strings.Contains(name, "\x00") {
+ return nil, errors.New("http: invalid character in file path")
+ }
+ dir := string(d.basePath)
+ if dir == "" {
+ dir = "."
+ }
+
+ f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+type HttpFs struct {
+ source Fs
+}
+
+func NewHttpFs(source Fs) *HttpFs {
+ return &HttpFs{source: source}
+}
+
+func (h HttpFs) Dir(s string) *httpDir {
+ return &httpDir{basePath: s, fs: h}
+}
+
+func (h HttpFs) Name() string { return "h HttpFs" }
+
+func (h HttpFs) Create(name string) (File, error) {
+ return h.source.Create(name)
+}
+
+func (h HttpFs) Chmod(name string, mode os.FileMode) error {
+ return h.source.Chmod(name, mode)
+}
+
+func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return h.source.Chtimes(name, atime, mtime)
+}
+
+func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
+ return h.source.Mkdir(name, perm)
+}
+
+func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
+ return h.source.MkdirAll(path, perm)
+}
+
+func (h HttpFs) Open(name string) (http.File, error) {
+ f, err := h.source.Open(name)
+ if err == nil {
+ if httpfile, ok := f.(http.File); ok {
+ return httpfile, nil
+ }
+ }
+ return nil, err
+}
+
+func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ return h.source.OpenFile(name, flag, perm)
+}
+
+func (h HttpFs) Remove(name string) error {
+ return h.source.Remove(name)
+}
+
+func (h HttpFs) RemoveAll(path string) error {
+ return h.source.RemoveAll(path)
+}
+
+func (h HttpFs) Rename(oldname, newname string) error {
+ return h.source.Rename(oldname, newname)
+}
+
+func (h HttpFs) Stat(name string) (os.FileInfo, error) {
+ return h.source.Stat(name)
+}
diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go
new file mode 100644
index 000000000..5c3a3d8ff
--- /dev/null
+++ b/vendor/github.com/spf13/afero/ioutil.go
@@ -0,0 +1,230 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia <spf@spf13.com>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// byName implements sort.Interface.
+type byName []os.FileInfo
+
+func (f byName) Len() int { return len(f) }
+func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
+func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
+
+// ReadDir reads the directory named by dirname and returns
+// a list of sorted directory entries.
+func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
+ return ReadDir(a.Fs, dirname)
+}
+
+func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ list, err := f.Readdir(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Sort(byName(list))
+ return list, nil
+}
+
+// ReadFile reads the file named by filename and returns the contents.
+// A successful call returns err == nil, not err == EOF. Because ReadFile
+// reads the whole file, it does not treat an EOF from Read as an error
+// to be reported.
+func (a Afero) ReadFile(filename string) ([]byte, error) {
+ return ReadFile(a.Fs, filename)
+}
+
+func ReadFile(fs Fs, filename string) ([]byte, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ // It's a good but not certain bet that FileInfo will tell us exactly how much to
+ // read, so let's try it but be prepared for the answer to be wrong.
+ var n int64
+
+ if fi, err := f.Stat(); err == nil {
+ // Don't preallocate a huge buffer, just in case.
+ if size := fi.Size(); size < 1e9 {
+ n = size
+ }
+ }
+ // As initial capacity for readAll, use n + a little extra in case Size is zero,
+ // and to avoid another allocation after Read has filled the buffer. The readAll
+ // call will read into its allocated internal buffer cheaply. If the size was
+ // wrong, we'll either waste some space off the end or reallocate as needed, but
+ // in the overwhelmingly common case we'll get it just right.
+ return readAll(f, n+bytes.MinRead)
+}
+
+// readAll reads from r until an error or EOF and returns the data it read
+// from the internal buffer allocated with a specified capacity.
+func readAll(r io.Reader, capacity int64) (b []byte, err error) {
+ buf := bytes.NewBuffer(make([]byte, 0, capacity))
+ // If the buffer overflows, we will get bytes.ErrTooLarge.
+ // Return that as an error. Any other panic remains.
+ defer func() {
+ e := recover()
+ if e == nil {
+ return
+ }
+ if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
+ err = panicErr
+ } else {
+ panic(e)
+ }
+ }()
+ _, err = buf.ReadFrom(r)
+ return buf.Bytes(), err
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadAll(r io.Reader) ([]byte, error) {
+ return readAll(r, bytes.MinRead)
+}
+
+// WriteFile writes data to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm;
+// otherwise WriteFile truncates it before writing.
+func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ return WriteFile(a.Fs, filename, data, perm)
+}
+
+func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
+ f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// Random number state.
+// We generate random temporary file names so that there's a good
+// chance the file doesn't exist yet - keeps the number of tries in
+// TempFile to a minimum.
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+ return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+
+func nextSuffix() string {
+ randmu.Lock()
+ r := rand
+ if r == 0 {
+ r = reseed()
+ }
+ r = r*1664525 + 1013904223 // constants from Numerical Recipes
+ rand = r
+ randmu.Unlock()
+ return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFile creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func (a Afero) TempFile(dir, prefix string) (f File, err error) {
+ return TempFile(a.Fs, dir, prefix)
+}
+
+func TempFile(fs Fs, dir, prefix string) (f File, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+nextSuffix())
+ f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
+
+// TempDir creates a new temporary directory in the directory dir
+// with a name beginning with prefix and returns the path of the
+// new directory. If dir is the empty string, TempDir uses the
+// default directory for temporary files (see os.TempDir).
+// Multiple programs calling TempDir simultaneously
+// will not choose the same directory. It is the caller's responsibility
+// to remove the directory when no longer needed.
+func (a Afero) TempDir(dir, prefix string) (name string, err error) {
+ return TempDir(a.Fs, dir, prefix)
+}
+func TempDir(fs Fs, dir, prefix string) (name string, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ try := filepath.Join(dir, prefix+nextSuffix())
+ err = fs.Mkdir(try, 0700)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ if err == nil {
+ name = try
+ }
+ break
+ }
+ return
+}
diff --git a/vendor/github.com/spf13/afero/ioutil_test.go b/vendor/github.com/spf13/afero/ioutil_test.go
new file mode 100644
index 000000000..e7c9f0698
--- /dev/null
+++ b/vendor/github.com/spf13/afero/ioutil_test.go
@@ -0,0 +1,112 @@
+// ©2015 The Go Authors
+// Copyright ©2015 Steve Francia <spf@spf13.com>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import "testing"
+
+func checkSizePath(t *testing.T, path string, size int64) {
+ dir, err := testFS.Stat(path)
+ if err != nil {
+ t.Fatalf("Stat %q (looking for size %d): %s", path, size, err)
+ }
+ if dir.Size() != size {
+ t.Errorf("Stat %q: size %d want %d", path, dir.Size(), size)
+ }
+}
+
+func TestReadFile(t *testing.T) {
+ testFS = &MemMapFs{}
+ fsutil := &Afero{Fs: testFS}
+
+ testFS.Create("this_exists.go")
+ filename := "rumpelstilzchen"
+ contents, err := fsutil.ReadFile(filename)
+ if err == nil {
+ t.Fatalf("ReadFile %s: error expected, none found", filename)
+ }
+
+ filename = "this_exists.go"
+ contents, err = fsutil.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("ReadFile %s: %v", filename, err)
+ }
+
+ checkSizePath(t, filename, int64(len(contents)))
+}
+
+func TestWriteFile(t *testing.T) {
+ testFS = &MemMapFs{}
+ fsutil := &Afero{Fs: testFS}
+ f, err := fsutil.TempFile("", "ioutil-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ filename := f.Name()
+ data := "Programming today is a race between software engineers striving to " +
+ "build bigger and better idiot-proof programs, and the Universe trying " +
+ "to produce bigger and better idiots. So far, the Universe is winning."
+
+ if err := fsutil.WriteFile(filename, []byte(data), 0644); err != nil {
+ t.Fatalf("WriteFile %s: %v", filename, err)
+ }
+
+ contents, err := fsutil.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("ReadFile %s: %v", filename, err)
+ }
+
+ if string(contents) != data {
+ t.Fatalf("contents = %q\nexpected = %q", string(contents), data)
+ }
+
+ // cleanup
+ f.Close()
+ testFS.Remove(filename) // ignore error
+}
+
+func TestReadDir(t *testing.T) {
+ testFS = &MemMapFs{}
+ testFS.Mkdir("/i-am-a-dir", 0777)
+ testFS.Create("/this_exists.go")
+ dirname := "rumpelstilzchen"
+ _, err := ReadDir(testFS, dirname)
+ if err == nil {
+ t.Fatalf("ReadDir %s: error expected, none found", dirname)
+ }
+
+ dirname = ".."
+ list, err := ReadDir(testFS, dirname)
+ if err != nil {
+ t.Fatalf("ReadDir %s: %v", dirname, err)
+ }
+
+ foundFile := false
+ foundSubDir := false
+ for _, dir := range list {
+ switch {
+ case !dir.IsDir() && dir.Name() == "this_exists.go":
+ foundFile = true
+ case dir.IsDir() && dir.Name() == "i-am-a-dir":
+ foundSubDir = true
+ }
+ }
+ if !foundFile {
+ t.Fatalf("ReadDir %s: this_exists.go file not found", dirname)
+ }
+ if !foundSubDir {
+ t.Fatalf("ReadDir %s: i-am-a-dir directory not found", dirname)
+ }
+}
diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go
new file mode 100644
index 000000000..e104013f4
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dir.go
@@ -0,0 +1,37 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+type Dir interface {
+ Len() int
+ Names() []string
+ Files() []*FileData
+ Add(*FileData)
+ Remove(*FileData)
+}
+
+func RemoveFromMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Remove(f)
+}
+
+func AddToMemDir(dir *FileData, f *FileData) {
+ dir.memDir.Add(f)
+}
+
+func InitializeDir(d *FileData) {
+ if d.memDir == nil {
+ d.dir = true
+ d.memDir = &DirMap{}
+ }
+}
diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go
new file mode 100644
index 000000000..03a57ee5b
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dirmap.go
@@ -0,0 +1,43 @@
+// Copyright © 2015 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import "sort"
+
+type DirMap map[string]*FileData
+
+func (m DirMap) Len() int { return len(m) }
+func (m DirMap) Add(f *FileData) { m[f.name] = f }
+func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
+func (m DirMap) Files() (files []*FileData) {
+ for _, f := range m {
+ files = append(files, f)
+ }
+ sort.Sort(filesSorter(files))
+ return files
+}
+
+// implement sort.Interface for []*FileData
+type filesSorter []*FileData
+
+func (s filesSorter) Len() int { return len(s) }
+func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
+
+func (m DirMap) Names() (names []string) {
+ for x := range m {
+ names = append(names, x)
+ }
+ return names
+}
diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go
new file mode 100644
index 000000000..e41e0123d
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/file.go
@@ -0,0 +1,285 @@
+// Copyright © 2015 Steve Francia <spf@spf13.com>.
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+)
+
+import "time"
+
+const FilePathSeparator = string(filepath.Separator)
+
+type File struct {
+ // atomic requires 64-bit alignment for struct field access
+ at int64
+ readDirCount int64
+ closed bool
+ readOnly bool
+ fileData *FileData
+}
+
+func NewFileHandle(data *FileData) *File {
+ return &File{fileData: data}
+}
+
+func NewReadOnlyFileHandle(data *FileData) *File {
+ return &File{fileData: data, readOnly: true}
+}
+
+func (f File) Data() *FileData {
+ return f.fileData
+}
+
+type FileData struct {
+ sync.Mutex
+ name string
+ data []byte
+ memDir Dir
+ dir bool
+ mode os.FileMode
+ modtime time.Time
+}
+
+func (d *FileData) Name() string {
+ d.Lock()
+ defer d.Unlock()
+ return d.name
+}
+
+func CreateFile(name string) *FileData {
+ return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
+}
+
+func CreateDir(name string) *FileData {
+ return &FileData{name: name, memDir: &DirMap{}, dir: true}
+}
+
+func ChangeFileName(f *FileData, newname string) {
+ f.name = newname
+}
+
+func SetMode(f *FileData, mode os.FileMode) {
+ f.mode = mode
+}
+
+func SetModTime(f *FileData, mtime time.Time) {
+ f.modtime = mtime
+}
+
+func GetFileInfo(f *FileData) *FileInfo {
+ return &FileInfo{f}
+}
+
+func (f *File) Open() error {
+ atomic.StoreInt64(&f.at, 0)
+ atomic.StoreInt64(&f.readDirCount, 0)
+ f.fileData.Lock()
+ f.closed = false
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Close() error {
+ f.fileData.Lock()
+ f.closed = true
+ if !f.readOnly {
+ SetModTime(f.fileData, time.Now())
+ }
+ f.fileData.Unlock()
+ return nil
+}
+
+func (f *File) Name() string {
+ return f.fileData.Name()
+}
+
+func (f *File) Stat() (os.FileInfo, error) {
+ return &FileInfo{f.fileData}, nil
+}
+
+func (f *File) Sync() error {
+ return nil
+}
+
+func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
+ var outLength int64
+
+ f.fileData.Lock()
+ files := f.fileData.memDir.Files()[f.readDirCount:]
+ if count > 0 {
+ if len(files) < count {
+ outLength = int64(len(files))
+ } else {
+ outLength = int64(count)
+ }
+ if len(files) == 0 {
+ err = io.EOF
+ }
+ } else {
+ outLength = int64(len(files))
+ }
+ f.readDirCount += outLength
+ f.fileData.Unlock()
+
+ res = make([]os.FileInfo, outLength)
+ for i := range res {
+ res[i] = &FileInfo{files[i]}
+ }
+
+ return res, err
+}
+
+func (f *File) Readdirnames(n int) (names []string, err error) {
+ fi, err := f.Readdir(n)
+ names = make([]string, len(fi))
+ for i, f := range fi {
+ _, names[i] = filepath.Split(f.Name())
+ }
+ return names, err
+}
+
+func (f *File) Read(b []byte) (n int, err error) {
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
+ if len(b) > 0 && int(f.at) == len(f.fileData.data) {
+ return 0, io.EOF
+ }
+ if len(f.fileData.data)-int(f.at) >= len(b) {
+ n = len(b)
+ } else {
+ n = len(f.fileData.data) - int(f.at)
+ }
+ copy(b, f.fileData.data[f.at:f.at+int64(n)])
+ atomic.AddInt64(&f.at, int64(n))
+ return
+}
+
+func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+ atomic.StoreInt64(&f.at, off)
+ return f.Read(b)
+}
+
+func (f *File) Truncate(size int64) error {
+ if f.closed == true {
+ return ErrFileClosed
+ }
+ if f.readOnly {
+ return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ if size < 0 {
+ return ErrOutOfRange
+ }
+ if size > int64(len(f.fileData.data)) {
+ diff := size - int64(len(f.fileData.data))
+ f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
+ } else {
+ f.fileData.data = f.fileData.data[0:size]
+ }
+ SetModTime(f.fileData, time.Now())
+ return nil
+}
+
+func (f *File) Seek(offset int64, whence int) (int64, error) {
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
+ switch whence {
+ case 0:
+ atomic.StoreInt64(&f.at, offset)
+ case 1:
+ atomic.AddInt64(&f.at, int64(offset))
+ case 2:
+ atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
+ }
+ return f.at, nil
+}
+
+func (f *File) Write(b []byte) (n int, err error) {
+ if f.readOnly {
+ return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+ }
+ n = len(b)
+ cur := atomic.LoadInt64(&f.at)
+ f.fileData.Lock()
+ defer f.fileData.Unlock()
+ diff := cur - int64(len(f.fileData.data))
+ var tail []byte
+ if n+int(cur) < len(f.fileData.data) {
+ tail = f.fileData.data[n+int(cur):]
+ }
+ if diff > 0 {
+ f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ } else {
+ f.fileData.data = append(f.fileData.data[:cur], b...)
+ f.fileData.data = append(f.fileData.data, tail...)
+ }
+ SetModTime(f.fileData, time.Now())
+
+ atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
+ return
+}
+
+func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
+ atomic.StoreInt64(&f.at, off)
+ return f.Write(b)
+}
+
+func (f *File) WriteString(s string) (ret int, err error) {
+ return f.Write([]byte(s))
+}
+
+func (f *File) Info() *FileInfo {
+ return &FileInfo{f.fileData}
+}
+
+type FileInfo struct {
+ *FileData
+}
+
+// Implements os.FileInfo
+func (s *FileInfo) Name() string {
+ _, name := filepath.Split(s.name)
+ return name
+}
+func (s *FileInfo) Mode() os.FileMode { return s.mode }
+func (s *FileInfo) ModTime() time.Time { return s.modtime }
+func (s *FileInfo) IsDir() bool { return s.dir }
+func (s *FileInfo) Sys() interface{} { return nil }
+func (s *FileInfo) Size() int64 {
+ if s.IsDir() {
+ return int64(42)
+ }
+ return int64(len(s.data))
+}
+
+var (
+ ErrFileClosed = errors.New("File is closed")
+ ErrOutOfRange = errors.New("Out of range")
+ ErrTooLarge = errors.New("Too large")
+ ErrFileNotFound = os.ErrNotExist
+ ErrFileExists = os.ErrExist
+ ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
new file mode 100644
index 000000000..767ac1d5f
--- /dev/null
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -0,0 +1,361 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spf13/afero/mem"
+)
+
+type MemMapFs struct {
+ mu sync.RWMutex
+ data map[string]*mem.FileData
+ init sync.Once
+}
+
+func NewMemMapFs() Fs {
+ return &MemMapFs{}
+}
+
+func (m *MemMapFs) getData() map[string]*mem.FileData {
+ m.init.Do(func() {
+ m.data = make(map[string]*mem.FileData)
+ // Root should always exist, right?
+ // TODO: what about windows?
+ m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator)
+ })
+ return m.data
+}
+
+func (*MemMapFs) Name() string { return "MemMapFS" }
+
+func (m *MemMapFs) Create(name string) (File, error) {
+ name = normalizePath(name)
+ m.mu.Lock()
+ file := mem.CreateFile(name)
+ m.getData()[name] = file
+ m.registerWithParent(file)
+ m.mu.Unlock()
+ return mem.NewFileHandle(file), nil
+}
+
+func (m *MemMapFs) unRegisterWithParent(fileName string) error {
+ f, err := m.lockfreeOpen(fileName)
+ if err != nil {
+ return err
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ log.Panic("parent of ", f.Name(), " is nil")
+ }
+ mem.RemoveFromMemDir(parent, f)
+ return nil
+}
+
+func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
+ pdir, _ := filepath.Split(f.Name())
+ pdir = filepath.Clean(pdir)
+ pfile, err := m.lockfreeOpen(pdir)
+ if err != nil {
+ return nil
+ }
+ return pfile
+}
+
+func (m *MemMapFs) registerWithParent(f *mem.FileData) {
+ if f == nil {
+ return
+ }
+ parent := m.findParent(f)
+ if parent == nil {
+ pdir := filepath.Dir(filepath.Clean(f.Name()))
+ err := m.lockfreeMkdir(pdir, 0777)
+ if err != nil {
+ //log.Println("Mkdir error:", err)
+ return
+ }
+ parent, err = m.lockfreeOpen(pdir)
+ if err != nil {
+ //log.Println("Open after Mkdir error:", err)
+ return
+ }
+ }
+
+ mem.InitializeDir(parent)
+ mem.AddToMemDir(parent, f)
+}
+
+func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
+ name = normalizePath(name)
+ x, ok := m.getData()[name]
+ if ok {
+ // Only return ErrFileExists if it's a file, not a directory.
+ i := mem.FileInfo{FileData: x}
+ if !i.IsDir() {
+ return ErrFileExists
+ }
+ } else {
+ item := mem.CreateDir(name)
+ m.getData()[name] = item
+ m.registerWithParent(item)
+ }
+ return nil
+}
+
+func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ _, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if ok {
+ return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
+ }
+
+ m.mu.Lock()
+ item := mem.CreateDir(name)
+ m.getData()[name] = item
+ m.registerWithParent(item)
+ m.mu.Unlock()
+
+ m.Chmod(name, perm)
+
+ return nil
+}
+
+func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
+ err := m.Mkdir(path, perm)
+ if err != nil {
+ if err.(*os.PathError).Err == ErrFileExists {
+ return nil
+ } else {
+ return err
+ }
+ }
+ return nil
+}
+
+// Handle some relative paths
+func normalizePath(path string) string {
+ path = filepath.Clean(path)
+
+ switch path {
+ case ".":
+ return FilePathSeparator
+ case "..":
+ return FilePathSeparator
+ default:
+ return path
+ }
+}
+
+func (m *MemMapFs) Open(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewReadOnlyFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) openWrite(name string) (File, error) {
+ f, err := m.open(name)
+ if f != nil {
+ return mem.NewFileHandle(f), err
+ }
+ return nil, err
+}
+
+func (m *MemMapFs) open(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
+ }
+ return f, nil
+}
+
+func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
+ name = normalizePath(name)
+ f, ok := m.getData()[name]
+ if ok {
+ return f, nil
+ } else {
+ return nil, ErrFileNotFound
+ }
+}
+
+func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ chmod := false
+ file, err := m.openWrite(name)
+ if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
+ file, err = m.Create(name)
+ chmod = true
+ }
+ if err != nil {
+ return nil, err
+ }
+ if flag == os.O_RDONLY {
+ file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
+ }
+ if flag&os.O_APPEND > 0 {
+ _, err = file.Seek(0, os.SEEK_END)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
+ err = file.Truncate(0)
+ if err != nil {
+ file.Close()
+ return nil, err
+ }
+ }
+ if chmod {
+ m.Chmod(name, perm)
+ }
+ return file, nil
+}
+
+func (m *MemMapFs) Remove(name string) error {
+ name = normalizePath(name)
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if _, ok := m.getData()[name]; ok {
+ err := m.unRegisterWithParent(name)
+ if err != nil {
+ return &os.PathError{Op: "remove", Path: name, Err: err}
+ }
+ delete(m.getData(), name)
+ } else {
+ return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
+ }
+ return nil
+}
+
+func (m *MemMapFs) RemoveAll(path string) error {
+ path = normalizePath(path)
+ m.mu.Lock()
+ m.unRegisterWithParent(path)
+ m.mu.Unlock()
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ for p, _ := range m.getData() {
+ if strings.HasPrefix(p, path) {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ delete(m.getData(), p)
+ m.mu.Unlock()
+ m.mu.RLock()
+ }
+ }
+ return nil
+}
+
+func (m *MemMapFs) Rename(oldname, newname string) error {
+ oldname = normalizePath(oldname)
+ newname = normalizePath(newname)
+
+ if oldname == newname {
+ return nil
+ }
+
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ if _, ok := m.getData()[oldname]; ok {
+ m.mu.RUnlock()
+ m.mu.Lock()
+ m.unRegisterWithParent(oldname)
+ fileData := m.getData()[oldname]
+ delete(m.getData(), oldname)
+ mem.ChangeFileName(fileData, newname)
+ m.getData()[newname] = fileData
+ m.registerWithParent(fileData)
+ m.mu.Unlock()
+ m.mu.RLock()
+ } else {
+ return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
+ }
+ return nil
+}
+
+func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
+ f, err := m.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ fi := mem.GetFileInfo(f.(*mem.File).Data())
+ return fi, nil
+}
+
+func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetMode(f, mode)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ name = normalizePath(name)
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
+ }
+
+ m.mu.Lock()
+ mem.SetModTime(f, mtime)
+ m.mu.Unlock()
+
+ return nil
+}
+
+func (m *MemMapFs) List() {
+ for _, x := range m.data {
+ y := mem.FileInfo{FileData: x}
+ fmt.Println(x.Name(), y.Size())
+ }
+}
+
+// func debugMemMapList(fs Fs) {
+// if x, ok := fs.(*MemMapFs); ok {
+// x.List()
+// }
+// }
diff --git a/vendor/github.com/spf13/afero/memmap_test.go b/vendor/github.com/spf13/afero/memmap_test.go
new file mode 100644
index 000000000..ca5abbca6
--- /dev/null
+++ b/vendor/github.com/spf13/afero/memmap_test.go
@@ -0,0 +1,345 @@
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+)
+
+func TestNormalizePath(t *testing.T) {
+ type test struct {
+ input string
+ expected string
+ }
+
+ data := []test{
+ {".", FilePathSeparator},
+ {"./", FilePathSeparator},
+ {"..", FilePathSeparator},
+ {"../", FilePathSeparator},
+ {"./..", FilePathSeparator},
+ {"./../", FilePathSeparator},
+ }
+
+ for i, d := range data {
+ cpath := normalizePath(d.input)
+ if d.expected != cpath {
+ t.Errorf("Test %d failed. Expected %q got %q", i, d.expected, cpath)
+ }
+ }
+}
+
+func TestPathErrors(t *testing.T) {
+ path := filepath.Join(".", "some", "path")
+ path2 := filepath.Join(".", "different", "path")
+ fs := NewMemMapFs()
+ perm := os.FileMode(0755)
+
+ // relevant functions:
+ // func (m *MemMapFs) Chmod(name string, mode os.FileMode) error
+ // func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error
+ // func (m *MemMapFs) Create(name string) (File, error)
+ // func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error
+ // func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error
+ // func (m *MemMapFs) Open(name string) (File, error)
+ // func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error)
+ // func (m *MemMapFs) Remove(name string) error
+ // func (m *MemMapFs) Rename(oldname, newname string) error
+ // func (m *MemMapFs) Stat(name string) (os.FileInfo, error)
+
+ err := fs.Chmod(path, perm)
+ checkPathError(t, err, "Chmod")
+
+ err = fs.Chtimes(path, time.Now(), time.Now())
+ checkPathError(t, err, "Chtimes")
+
+ // fs.Create doesn't return an error
+
+ err = fs.Mkdir(path2, perm)
+ if err != nil {
+ t.Error(err)
+ }
+ err = fs.Mkdir(path2, perm)
+ checkPathError(t, err, "Mkdir")
+
+ err = fs.MkdirAll(path2, perm)
+ if err != nil {
+ t.Error("MkdirAll:", err)
+ }
+
+ _, err = fs.Open(path)
+ checkPathError(t, err, "Open")
+
+ _, err = fs.OpenFile(path, os.O_RDWR, perm)
+ checkPathError(t, err, "OpenFile")
+
+ err = fs.Remove(path)
+ checkPathError(t, err, "Remove")
+
+ err = fs.RemoveAll(path)
+ if err != nil {
+ t.Error("RemoveAll:", err)
+ }
+
+ err = fs.Rename(path, path2)
+ checkPathError(t, err, "Rename")
+
+ _, err = fs.Stat(path)
+ checkPathError(t, err, "Stat")
+}
+
+func checkPathError(t *testing.T, err error, op string) {
+ pathErr, ok := err.(*os.PathError)
+ if !ok {
+ t.Error(op+":", err, "is not a os.PathError")
+ return
+ }
+ _, ok = pathErr.Err.(*os.PathError)
+ if ok {
+ t.Error(op+":", err, "contains another os.PathError")
+ }
+}
+
+// Ensure Permissions are set on OpenFile/Mkdir/MkdirAll
+func TestPermSet(t *testing.T) {
+ const fileName = "/myFileTest"
+ const dirPath = "/myDirTest"
+ const dirPathAll = "/my/path/to/dir"
+
+ const fileMode = os.FileMode(0765)
+
+ fs := NewMemMapFs()
+
+ // Test Openfile
+ f, err := fs.OpenFile(fileName, os.O_CREATE, fileMode)
+ if err != nil {
+ t.Errorf("OpenFile Create failed: %s", err)
+ return
+ }
+ f.Close()
+
+ s, err := fs.Stat(fileName)
+ if err != nil {
+ t.Errorf("Stat failed: %s", err)
+ return
+ }
+ if s.Mode().String() != fileMode.String() {
+ t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), fileMode.String())
+ return
+ }
+
+ // Test Mkdir
+ err = fs.Mkdir(dirPath, fileMode)
+ if err != nil {
+ t.Errorf("MkDir Create failed: %s", err)
+ return
+ }
+ s, err = fs.Stat(dirPath)
+ if err != nil {
+ t.Errorf("Stat failed: %s", err)
+ return
+ }
+ if s.Mode().String() != fileMode.String() {
+ t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), fileMode.String())
+ return
+ }
+
+ // Test MkdirAll
+ err = fs.MkdirAll(dirPathAll, fileMode)
+ if err != nil {
+ t.Errorf("MkDir Create failed: %s", err)
+ return
+ }
+ s, err = fs.Stat(dirPathAll)
+ if err != nil {
+ t.Errorf("Stat failed: %s", err)
+ return
+ }
+ if s.Mode().String() != fileMode.String() {
+ t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), fileMode.String())
+ return
+ }
+}
+
+// Fails if multiple file objects use the same file.at counter in MemMapFs
+func TestMultipleOpenFiles(t *testing.T) {
+ defer removeAllTestFiles(t)
+ const fileName = "afero-demo2.txt"
+
+ var data = make([][]byte, len(Fss))
+
+ for i, fs := range Fss {
+ dir := testDir(fs)
+ path := filepath.Join(dir, fileName)
+ fh1, err := fs.Create(path)
+ if err != nil {
+ t.Error("fs.Create failed: " + err.Error())
+ }
+ _, err = fh1.Write([]byte("test"))
+ if err != nil {
+ t.Error("fh.Write failed: " + err.Error())
+ }
+ _, err = fh1.Seek(0, os.SEEK_SET)
+ if err != nil {
+ t.Error(err)
+ }
+
+ fh2, err := fs.OpenFile(path, os.O_RDWR, 0777)
+ if err != nil {
+ t.Error("fs.OpenFile failed: " + err.Error())
+ }
+ _, err = fh2.Seek(0, os.SEEK_END)
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = fh2.Write([]byte("data"))
+ if err != nil {
+ t.Error(err)
+ }
+ err = fh2.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = fh1.Write([]byte("data"))
+ if err != nil {
+ t.Error(err)
+ }
+ err = fh1.Close()
+ if err != nil {
+ t.Error(err)
+ }
+ // the file now should contain "datadata"
+ data[i], err = ReadFile(fs, path)
+ if err != nil {
+ t.Error(err)
+ }
+ }
+
+ for i, fs := range Fss {
+ if i == 0 {
+ continue
+ }
+ if string(data[0]) != string(data[i]) {
+ t.Errorf("%s and %s don't behave the same\n"+
+ "%s: \"%s\"\n%s: \"%s\"\n",
+ Fss[0].Name(), fs.Name(), Fss[0].Name(), data[0], fs.Name(), data[i])
+ }
+ }
+}
+
+// Test if file.Write() fails when opened as read only
+func TestReadOnly(t *testing.T) {
+ defer removeAllTestFiles(t)
+ const fileName = "afero-demo.txt"
+
+ for _, fs := range Fss {
+ dir := testDir(fs)
+ path := filepath.Join(dir, fileName)
+
+ f, err := fs.Create(path)
+ if err != nil {
+ t.Error(fs.Name()+":", "fs.Create failed: "+err.Error())
+ }
+ _, err = f.Write([]byte("test"))
+ if err != nil {
+ t.Error(fs.Name()+":", "Write failed: "+err.Error())
+ }
+ f.Close()
+
+ f, err = fs.Open(path)
+ if err != nil {
+ t.Error("fs.Open failed: " + err.Error())
+ }
+ _, err = f.Write([]byte("data"))
+ if err == nil {
+ t.Error(fs.Name()+":", "No write error")
+ }
+ f.Close()
+
+ f, err = fs.OpenFile(path, os.O_RDONLY, 0644)
+ if err != nil {
+ t.Error("fs.Open failed: " + err.Error())
+ }
+ _, err = f.Write([]byte("data"))
+ if err == nil {
+ t.Error(fs.Name()+":", "No write error")
+ }
+ f.Close()
+ }
+}
+
+func TestWriteCloseTime(t *testing.T) {
+ defer removeAllTestFiles(t)
+ const fileName = "afero-demo.txt"
+
+ for _, fs := range Fss {
+ dir := testDir(fs)
+ path := filepath.Join(dir, fileName)
+
+ f, err := fs.Create(path)
+ if err != nil {
+ t.Error(fs.Name()+":", "fs.Create failed: "+err.Error())
+ }
+ f.Close()
+
+ f, err = fs.Create(path)
+ if err != nil {
+ t.Error(fs.Name()+":", "fs.Create failed: "+err.Error())
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ t.Error(fs.Name()+":", "Stat failed: "+err.Error())
+ }
+ timeBefore := fi.ModTime()
+
+ // sorry for the delay, but we have to make sure time advances,
+ // also on non Un*x systems...
+ switch runtime.GOOS {
+ case "windows":
+ time.Sleep(2 * time.Second)
+ case "darwin":
+ time.Sleep(1 * time.Second)
+ default: // depending on the FS, this may work with < 1 second, on my old ext3 it does not
+ time.Sleep(1 * time.Second)
+ }
+
+ _, err = f.Write([]byte("test"))
+ if err != nil {
+ t.Error(fs.Name()+":", "Write failed: "+err.Error())
+ }
+ f.Close()
+ fi, err = fs.Stat(path)
+ if err != nil {
+ t.Error(fs.Name()+":", "fs.Stat failed: "+err.Error())
+ }
+ if fi.ModTime().Equal(timeBefore) {
+ t.Error(fs.Name()+":", "ModTime was not set on Close()")
+ }
+ }
+}
+
+// This test should be run with the race detector on:
+// go test -race -v -timeout 10s -run TestRacingDeleteAndClose
+func TestRacingDeleteAndClose(t *testing.T) {
+ fs := NewMemMapFs()
+ pathname := "testfile"
+ f, err := fs.Create(pathname)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ in := make(chan bool)
+
+ go func() {
+ <-in
+ f.Close()
+ }()
+ go func() {
+ <-in
+ fs.Remove(pathname)
+ }()
+ close(in)
+}
diff --git a/vendor/github.com/spf13/afero/memradix.go b/vendor/github.com/spf13/afero/memradix.go
new file mode 100644
index 000000000..87527f35a
--- /dev/null
+++ b/vendor/github.com/spf13/afero/memradix.go
@@ -0,0 +1,14 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go
new file mode 100644
index 000000000..6b8bce1c5
--- /dev/null
+++ b/vendor/github.com/spf13/afero/os.go
@@ -0,0 +1,94 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "time"
+)
+
+// OsFs is a Fs implementation that uses functions provided by the os package.
+//
+// For details in any method, check the documentation of the os package
+// (http://golang.org/pkg/os/).
+type OsFs struct{}
+
+func NewOsFs() Fs {
+ return &OsFs{}
+}
+
+func (OsFs) Name() string { return "OsFs" }
+
+func (OsFs) Create(name string) (File, error) {
+ f, e := os.Create(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Mkdir(name string, perm os.FileMode) error {
+ return os.Mkdir(name, perm)
+}
+
+func (OsFs) MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+func (OsFs) Open(name string) (File, error) {
+ f, e := os.Open(name)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ f, e := os.OpenFile(name, flag, perm)
+ if f == nil {
+ // while this looks strange, we need to return a bare nil (of type nil) not
+ // a nil value of type *os.File or nil won't be nil
+ return nil, e
+ }
+ return f, e
+}
+
+func (OsFs) Remove(name string) error {
+ return os.Remove(name)
+}
+
+func (OsFs) RemoveAll(path string) error {
+ return os.RemoveAll(path)
+}
+
+func (OsFs) Rename(oldname, newname string) error {
+ return os.Rename(oldname, newname)
+}
+
+func (OsFs) Stat(name string) (os.FileInfo, error) {
+ return os.Stat(name)
+}
+
+func (OsFs) Chmod(name string, mode os.FileMode) error {
+ return os.Chmod(name, mode)
+}
+
+func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return os.Chtimes(name, atime, mtime)
+}
diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go
new file mode 100644
index 000000000..1d90e46dd
--- /dev/null
+++ b/vendor/github.com/spf13/afero/path.go
@@ -0,0 +1,108 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia <spf@spf13.com>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+// adapted from https://golang.org/src/path/filepath/path.go
+func readDirNames(fs Fs, dirname string) ([]string, error) {
+ f, err := fs.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ names, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// walk recursively descends path, calling walkFn
+// adapted from https://golang.org/src/path/filepath/path.go
+func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ err := walkFn(path, info, nil)
+ if err != nil {
+ if info.IsDir() && err == filepath.SkipDir {
+ return nil
+ }
+ return err
+ }
+
+ if !info.IsDir() {
+ return nil
+ }
+
+ names, err := readDirNames(fs, path)
+ if err != nil {
+ return walkFn(path, info, err)
+ }
+
+ for _, name := range names {
+ filename := filepath.Join(path, name)
+ fileInfo, err := lstatIfOs(fs, filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = walk(fs, filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// if the filesystem is OsFs use Lstat, else use fs.Stat
+func lstatIfOs(fs Fs, path string) (info os.FileInfo, err error) {
+ _, ok := fs.(*OsFs)
+ if ok {
+ info, err = os.Lstat(path)
+ } else {
+ info, err = fs.Stat(path)
+ }
+ return
+}
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or
+// directory in the tree, including root. All errors that arise visiting files
+// and directories are filtered by walkFn. The files are walked in lexical
+// order, which makes the output deterministic but means that for very
+// large directories Walk can be inefficient.
+// Walk does not follow symbolic links.
+
+func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
+ return Walk(a.Fs, root, walkFn)
+}
+
+func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
+ info, err := lstatIfOs(fs, root)
+ if err != nil {
+ return walkFn(root, nil, err)
+ }
+ return walk(fs, root, info, walkFn)
+}
diff --git a/vendor/github.com/spf13/afero/path_test.go b/vendor/github.com/spf13/afero/path_test.go
new file mode 100644
index 000000000..104a6bcbe
--- /dev/null
+++ b/vendor/github.com/spf13/afero/path_test.go
@@ -0,0 +1,69 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2009 The Go Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "fmt"
+ "os"
+ "testing"
+)
+
+func TestWalk(t *testing.T) {
+ defer removeAllTestFiles(t)
+ var testDir string
+ for i, fs := range Fss {
+ if i == 0 {
+ testDir = setupTestDirRoot(t, fs)
+ } else {
+ setupTestDirReusePath(t, fs, testDir)
+ }
+ }
+
+ outputs := make([]string, len(Fss))
+ for i, fs := range Fss {
+ walkFn := func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ t.Error("walkFn err:", err)
+ }
+ var size int64
+ if !info.IsDir() {
+ size = info.Size()
+ }
+ outputs[i] += fmt.Sprintln(path, info.Name(), size, info.IsDir(), err)
+ return nil
+ }
+ err := Walk(fs, testDir, walkFn)
+ if err != nil {
+ t.Error(err)
+ }
+ }
+ fail := false
+ for i, o := range outputs {
+ if i == 0 {
+ continue
+ }
+ if o != outputs[i-1] {
+ fail = true
+ break
+ }
+ }
+ if fail {
+ t.Log("Walk outputs not equal!")
+ for i, o := range outputs {
+ t.Log(Fss[i].Name() + "\n" + o)
+ }
+ t.Fail()
+ }
+}
diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go
new file mode 100644
index 000000000..f1fa55bcf
--- /dev/null
+++ b/vendor/github.com/spf13/afero/readonlyfs.go
@@ -0,0 +1,70 @@
+package afero
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+type ReadOnlyFs struct {
+ source Fs
+}
+
+func NewReadOnlyFs(source Fs) Fs {
+ return &ReadOnlyFs{source: source}
+}
+
+func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
+ return ReadDir(r.source, name)
+}
+
+func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Name() string {
+ return "ReadOnlyFilter"
+}
+
+func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
+ return r.source.Stat(name)
+}
+
+func (r *ReadOnlyFs) Rename(o, n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) RemoveAll(p string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Remove(n string) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+ return nil, syscall.EPERM
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *ReadOnlyFs) Open(n string) (File, error) {
+ return r.source.Open(n)
+}
+
+func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
+ return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Create(n string) (File, error) {
+ return nil, syscall.EPERM
+}
diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go
new file mode 100644
index 000000000..9d92dbc05
--- /dev/null
+++ b/vendor/github.com/spf13/afero/regexpfs.go
@@ -0,0 +1,214 @@
+package afero
+
+import (
+ "os"
+ "regexp"
+ "syscall"
+ "time"
+)
+
+// The RegexpFs filters files (not directories) by regular expression. Only
+// files matching the given regexp will be allowed, all others get a ENOENT error (
+// "No such file or directory").
+//
+type RegexpFs struct {
+ re *regexp.Regexp
+ source Fs
+}
+
+func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
+ return &RegexpFs{source: source, re: re}
+}
+
+type RegexpFile struct {
+ f File
+ re *regexp.Regexp
+}
+
+func (r *RegexpFs) matchesName(name string) error {
+ if r.re == nil {
+ return nil
+ }
+ if r.re.MatchString(name) {
+ return nil
+ }
+ return syscall.ENOENT
+}
+
+func (r *RegexpFs) dirOrMatches(name string) error {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ return r.matchesName(name)
+}
+
+func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chtimes(name, a, m)
+}
+
+func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Chmod(name, mode)
+}
+
+func (r *RegexpFs) Name() string {
+ return "RegexpFs"
+}
+
+func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.Stat(name)
+}
+
+func (r *RegexpFs) Rename(oldname, newname string) error {
+ dir, err := IsDir(r.source, oldname)
+ if err != nil {
+ return err
+ }
+ if dir {
+ return nil
+ }
+ if err := r.matchesName(oldname); err != nil {
+ return err
+ }
+ if err := r.matchesName(newname); err != nil {
+ return err
+ }
+ return r.source.Rename(oldname, newname)
+}
+
+func (r *RegexpFs) RemoveAll(p string) error {
+ dir, err := IsDir(r.source, p)
+ if err != nil {
+ return err
+ }
+ if !dir {
+ if err := r.matchesName(p); err != nil {
+ return err
+ }
+ }
+ return r.source.RemoveAll(p)
+}
+
+func (r *RegexpFs) Remove(name string) error {
+ if err := r.dirOrMatches(name); err != nil {
+ return err
+ }
+ return r.source.Remove(name)
+}
+
+func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ if err := r.dirOrMatches(name); err != nil {
+ return nil, err
+ }
+ return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *RegexpFs) Open(name string) (File, error) {
+ dir, err := IsDir(r.source, name)
+ if err != nil {
+ return nil, err
+ }
+ if !dir {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ }
+ f, err := r.source.Open(name)
+ return &RegexpFile{f: f, re: r.re}, nil
+}
+
+func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
+ return r.source.Mkdir(n, p)
+}
+
+func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
+ return r.source.MkdirAll(n, p)
+}
+
+func (r *RegexpFs) Create(name string) (File, error) {
+ if err := r.matchesName(name); err != nil {
+ return nil, err
+ }
+ return r.source.Create(name)
+}
+
+func (f *RegexpFile) Close() error {
+ return f.f.Close()
+}
+
+func (f *RegexpFile) Read(s []byte) (int, error) {
+ return f.f.Read(s)
+}
+
+func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
+ return f.f.ReadAt(s, o)
+}
+
+func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
+ return f.f.Seek(o, w)
+}
+
+func (f *RegexpFile) Write(s []byte) (int, error) {
+ return f.f.Write(s)
+}
+
+func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
+ return f.f.WriteAt(s, o)
+}
+
+func (f *RegexpFile) Name() string {
+ return f.f.Name()
+}
+
+func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
+ var rfi []os.FileInfo
+ rfi, err = f.f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, i := range rfi {
+ if i.IsDir() || f.re.MatchString(i.Name()) {
+ fi = append(fi, i)
+ }
+ }
+ return fi, nil
+}
+
+func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
+ fi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range fi {
+ n = append(n, s.Name())
+ }
+ return n, nil
+}
+
+func (f *RegexpFile) Stat() (os.FileInfo, error) {
+ return f.f.Stat()
+}
+
+func (f *RegexpFile) Sync() error {
+ return f.f.Sync()
+}
+
+func (f *RegexpFile) Truncate(s int64) error {
+ return f.f.Truncate(s)
+}
+
+func (f *RegexpFile) WriteString(s string) (int, error) {
+ return f.f.WriteString(s)
+}
diff --git a/vendor/github.com/spf13/afero/ro_regexp_test.go b/vendor/github.com/spf13/afero/ro_regexp_test.go
new file mode 100644
index 000000000..ef8a35d0d
--- /dev/null
+++ b/vendor/github.com/spf13/afero/ro_regexp_test.go
@@ -0,0 +1,96 @@
+package afero
+
+import (
+ "regexp"
+ "testing"
+)
+
+func TestFilterReadOnly(t *testing.T) {
+ fs := &ReadOnlyFs{source: &MemMapFs{}}
+ _, err := fs.Create("/file.txt")
+ if err == nil {
+ t.Errorf("Did not fail to create file")
+ }
+ // t.Logf("ERR=%s", err)
+}
+
+func TestFilterReadonlyRemoveAndRead(t *testing.T) {
+ mfs := &MemMapFs{}
+ fh, err := mfs.Create("/file.txt")
+ fh.Write([]byte("content here"))
+ fh.Close()
+
+ fs := NewReadOnlyFs(mfs)
+ err = fs.Remove("/file.txt")
+ if err == nil {
+ t.Errorf("Did not fail to remove file")
+ }
+
+ fh, err = fs.Open("/file.txt")
+ if err != nil {
+ t.Errorf("Failed to open file: %s", err)
+ }
+
+ buf := make([]byte, len("content here"))
+ _, err = fh.Read(buf)
+ fh.Close()
+ if string(buf) != "content here" {
+ t.Errorf("Failed to read file: %s", err)
+ }
+
+ err = mfs.Remove("/file.txt")
+ if err != nil {
+ t.Errorf("Failed to remove file")
+ }
+
+ fh, err = fs.Open("/file.txt")
+ if err == nil {
+ fh.Close()
+ t.Errorf("File still present")
+ }
+}
+
+func TestFilterRegexp(t *testing.T) {
+ fs := NewRegexpFs(&MemMapFs{}, regexp.MustCompile(`\.txt$`))
+ _, err := fs.Create("/file.html")
+ if err == nil {
+
+ t.Errorf("Did not fail to create file")
+ }
+ // t.Logf("ERR=%s", err)
+}
+
+func TestFilterRORegexpChain(t *testing.T) {
+ rofs := &ReadOnlyFs{source: &MemMapFs{}}
+ fs := &RegexpFs{re: regexp.MustCompile(`\.txt$`), source: rofs}
+ _, err := fs.Create("/file.txt")
+ if err == nil {
+ t.Errorf("Did not fail to create file")
+ }
+ // t.Logf("ERR=%s", err)
+}
+
+func TestFilterRegexReadDir(t *testing.T) {
+ mfs := &MemMapFs{}
+ fs1 := &RegexpFs{re: regexp.MustCompile(`\.txt$`), source: mfs}
+ fs := &RegexpFs{re: regexp.MustCompile(`^a`), source: fs1}
+
+ mfs.MkdirAll("/dir/sub", 0777)
+ for _, name := range []string{"afile.txt", "afile.html", "bfile.txt"} {
+ for _, dir := range []string{"/dir/", "/dir/sub/"} {
+ fh, _ := mfs.Create(dir + name)
+ fh.Close()
+ }
+ }
+
+ files, _ := ReadDir(fs, "/dir")
+ if len(files) != 2 { // afile.txt, sub
+ t.Errorf("Got wrong number of files: %#v", files)
+ }
+
+ f, _ := fs.Open("/dir/sub")
+ names, _ := f.Readdirnames(-1)
+ if len(names) != 1 {
+ t.Errorf("Got wrong number of names: %v", names)
+ }
+}
diff --git a/vendor/github.com/spf13/afero/sftpfs/file.go b/vendor/github.com/spf13/afero/sftpfs/file.go
new file mode 100644
index 000000000..e4ccb55c0
--- /dev/null
+++ b/vendor/github.com/spf13/afero/sftpfs/file.go
@@ -0,0 +1,95 @@
+// Copyright © 2015 Jerry Jacobs <jerry.jacobs@xor-gate.org>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sftpfs
+
+import (
+ "github.com/pkg/sftp"
+ "os"
+)
+
+type File struct {
+ fd *sftp.File
+}
+
+func FileOpen(s *sftp.Client, name string) (*File, error) {
+ fd, err := s.Open(name)
+ if err != nil {
+ return &File{}, err
+ }
+ return &File{fd: fd}, nil
+}
+
+func FileCreate(s *sftp.Client, name string) (*File, error) {
+ fd, err := s.Create(name)
+ if err != nil {
+ return &File{}, err
+ }
+ return &File{fd: fd}, nil
+}
+
+func (f *File) Close() error {
+ return f.fd.Close()
+}
+
+func (f *File) Name() string {
+ return f.fd.Name()
+}
+
+func (f *File) Stat() (os.FileInfo, error) {
+ return f.fd.Stat()
+}
+
+func (f *File) Sync() error {
+ return nil
+}
+
+func (f *File) Truncate(size int64) error {
+ return f.fd.Truncate(size)
+}
+
+func (f *File) Read(b []byte) (n int, err error) {
+ return f.fd.Read(b)
+}
+
+// TODO
+func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+ return 0, nil
+}
+
+// TODO
+func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
+ return nil, nil
+}
+
+// TODO
+func (f *File) Readdirnames(n int) (names []string, err error) {
+ return nil, nil
+}
+
+func (f *File) Seek(offset int64, whence int) (int64, error) {
+ return f.fd.Seek(offset, whence)
+}
+
+func (f *File) Write(b []byte) (n int, err error) {
+ return f.fd.Write(b)
+}
+
+// TODO
+func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
+ return 0, nil
+}
+
+func (f *File) WriteString(s string) (ret int, err error) {
+ return f.fd.Write([]byte(s))
+}
diff --git a/vendor/github.com/spf13/afero/sftpfs/sftp.go b/vendor/github.com/spf13/afero/sftpfs/sftp.go
new file mode 100644
index 000000000..28721da76
--- /dev/null
+++ b/vendor/github.com/spf13/afero/sftpfs/sftp.go
@@ -0,0 +1,129 @@
+// Copyright © 2015 Jerry Jacobs <jerry.jacobs@xor-gate.org>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sftpfs
+
+import (
+ "os"
+ "time"
+
+ "github.com/pkg/sftp"
+ "github.com/spf13/afero"
+)
+
+// Fs is a afero.Fs implementation that uses functions provided by the sftp package.
+//
+// For details in any method, check the documentation of the sftp package
+// (github.com/pkg/sftp).
+type Fs struct {
+ client *sftp.Client
+}
+
+func New(client *sftp.Client) afero.Fs {
+ return &Fs{client: client}
+}
+
+func (s Fs) Name() string { return "sftpfs" }
+
+func (s Fs) Create(name string) (afero.File, error) {
+ return FileCreate(s.client, name)
+}
+
+func (s Fs) Mkdir(name string, perm os.FileMode) error {
+ err := s.client.Mkdir(name)
+ if err != nil {
+ return err
+ }
+ return s.client.Chmod(name, perm)
+}
+
+func (s Fs) MkdirAll(path string, perm os.FileMode) error {
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := s.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = s.MkdirAll(path[0:j-1], perm)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = s.Mkdir(path, perm)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := s.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+func (s Fs) Open(name string) (afero.File, error) {
+ return FileOpen(s.client, name)
+}
+
+func (s Fs) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
+ return nil, nil
+}
+
+func (s Fs) Remove(name string) error {
+ return s.client.Remove(name)
+}
+
+func (s Fs) RemoveAll(path string) error {
+ // TODO have a look at os.RemoveAll
+ // https://github.com/golang/go/blob/master/src/os/path.go#L66
+ return nil
+}
+
+func (s Fs) Rename(oldname, newname string) error {
+ return s.client.Rename(oldname, newname)
+}
+
+func (s Fs) Stat(name string) (os.FileInfo, error) {
+ return s.client.Stat(name)
+}
+
+func (s Fs) Lstat(p string) (os.FileInfo, error) {
+ return s.client.Lstat(p)
+}
+
+func (s Fs) Chmod(name string, mode os.FileMode) error {
+ return s.client.Chmod(name, mode)
+}
+
+func (s Fs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+ return s.client.Chtimes(name, atime, mtime)
+}
diff --git a/vendor/github.com/spf13/afero/sftpfs/sftp_test_go b/vendor/github.com/spf13/afero/sftpfs/sftp_test_go
new file mode 100644
index 000000000..bb00535d8
--- /dev/null
+++ b/vendor/github.com/spf13/afero/sftpfs/sftp_test_go
@@ -0,0 +1,286 @@
+// Copyright © 2015 Jerry Jacobs <jerry.jacobs@xor-gate.org>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "testing"
+ "os"
+ "log"
+ "fmt"
+ "net"
+ "flag"
+ "time"
+ "io/ioutil"
+ "crypto/rsa"
+ _rand "crypto/rand"
+ "encoding/pem"
+ "crypto/x509"
+
+ "golang.org/x/crypto/ssh"
+ "github.com/pkg/sftp"
+)
+
+type SftpFsContext struct {
+ sshc *ssh.Client
+ sshcfg *ssh.ClientConfig
+ sftpc *sftp.Client
+}
+
+// TODO we only connect with hardcoded user+pass for now
+// it should be possible to use $HOME/.ssh/id_rsa to login into the stub sftp server
+func SftpConnect(user, password, host string) (*SftpFsContext, error) {
+/*
+ pemBytes, err := ioutil.ReadFile(os.Getenv("HOME") + "/.ssh/id_rsa")
+ if err != nil {
+ return nil,err
+ }
+
+ signer, err := ssh.ParsePrivateKey(pemBytes)
+ if err != nil {
+ return nil,err
+ }
+
+ sshcfg := &ssh.ClientConfig{
+ User: user,
+ Auth: []ssh.AuthMethod{
+ ssh.Password(password),
+ ssh.PublicKeys(signer),
+ },
+ }
+*/
+
+ sshcfg := &ssh.ClientConfig{
+ User: user,
+ Auth: []ssh.AuthMethod{
+ ssh.Password(password),
+ },
+ }
+
+ sshc, err := ssh.Dial("tcp", host, sshcfg)
+ if err != nil {
+ return nil,err
+ }
+
+ sftpc, err := sftp.NewClient(sshc)
+ if err != nil {
+ return nil,err
+ }
+
+ ctx := &SftpFsContext{
+ sshc: sshc,
+ sshcfg: sshcfg,
+ sftpc: sftpc,
+ }
+
+ return ctx,nil
+}
+
+func (ctx *SftpFsContext) Disconnect() error {
+ ctx.sftpc.Close()
+ ctx.sshc.Close()
+ return nil
+}
+
+// TODO for such a weird reason rootpath is "." when writing "file1" with afero sftp backend
+func RunSftpServer(rootpath string) {
+ var (
+ readOnly bool
+ debugLevelStr string
+ debugLevel int
+ debugStderr bool
+ rootDir string
+ )
+
+ flag.BoolVar(&readOnly, "R", false, "read-only server")
+ flag.BoolVar(&debugStderr, "e", true, "debug to stderr")
+ flag.StringVar(&debugLevelStr, "l", "none", "debug level")
+ flag.StringVar(&rootDir, "root", rootpath, "root directory")
+ flag.Parse()
+
+ debugStream := ioutil.Discard
+ if debugStderr {
+ debugStream = os.Stderr
+ debugLevel = 1
+ }
+
+ // An SSH server is represented by a ServerConfig, which holds
+ // certificate details and handles authentication of ServerConns.
+ config := &ssh.ServerConfig{
+ PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
+ // Should use constant-time compare (or better, salt+hash) in
+ // a production setting.
+ fmt.Fprintf(debugStream, "Login: %s\n", c.User())
+ if c.User() == "test" && string(pass) == "test" {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("password rejected for %q", c.User())
+ },
+ }
+
+ privateBytes, err := ioutil.ReadFile("./test/id_rsa")
+ if err != nil {
+ log.Fatal("Failed to load private key", err)
+ }
+
+ private, err := ssh.ParsePrivateKey(privateBytes)
+ if err != nil {
+ log.Fatal("Failed to parse private key", err)
+ }
+
+ config.AddHostKey(private)
+
+ // Once a ServerConfig has been configured, connections can be
+ // accepted.
+ listener, err := net.Listen("tcp", "0.0.0.0:2022")
+ if err != nil {
+ log.Fatal("failed to listen for connection", err)
+ }
+ fmt.Printf("Listening on %v\n", listener.Addr())
+
+ nConn, err := listener.Accept()
+ if err != nil {
+ log.Fatal("failed to accept incoming connection", err)
+ }
+
+ // Before use, a handshake must be performed on the incoming
+ // net.Conn.
+ _, chans, reqs, err := ssh.NewServerConn(nConn, config)
+ if err != nil {
+ log.Fatal("failed to handshake", err)
+ }
+ fmt.Fprintf(debugStream, "SSH server established\n")
+
+ // The incoming Request channel must be serviced.
+ go ssh.DiscardRequests(reqs)
+
+ // Service the incoming Channel channel.
+ for newChannel := range chans {
+ // Channels have a type, depending on the application level
+ // protocol intended. In the case of an SFTP session, this is "subsystem"
+ // with a payload string of "<length=4>sftp"
+ fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType())
+ if newChannel.ChannelType() != "session" {
+ newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
+ fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType())
+ continue
+ }
+ channel, requests, err := newChannel.Accept()
+ if err != nil {
+ log.Fatal("could not accept channel.", err)
+ }
+ fmt.Fprintf(debugStream, "Channel accepted\n")
+
+ // Sessions have out-of-band requests such as "shell",
+ // "pty-req" and "env". Here we handle only the
+ // "subsystem" request.
+ go func(in <-chan *ssh.Request) {
+ for req := range in {
+ fmt.Fprintf(debugStream, "Request: %v\n", req.Type)
+ ok := false
+ switch req.Type {
+ case "subsystem":
+ fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:])
+ if string(req.Payload[4:]) == "sftp" {
+ ok = true
+ }
+ }
+ fmt.Fprintf(debugStream, " - accepted: %v\n", ok)
+ req.Reply(ok, nil)
+ }
+ }(requests)
+
+ server, err := sftp.NewServer(channel, channel, debugStream, debugLevel, readOnly, rootpath)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := server.Serve(); err != nil {
+ log.Fatal("sftp server completed with error:", err)
+ }
+ }
+}
+
+// MakeSSHKeyPair make a pair of public and private keys for SSH access.
+// Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file.
+// Private Key generated is PEM encoded
+func MakeSSHKeyPair(bits int, pubKeyPath, privateKeyPath string) error {
+ privateKey, err := rsa.GenerateKey(_rand.Reader, bits)
+ if err != nil {
+ return err
+ }
+
+ // generate and write private key as PEM
+ privateKeyFile, err := os.Create(privateKeyPath)
+ defer privateKeyFile.Close()
+ if err != nil {
+ return err
+ }
+
+ privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
+ if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil {
+ return err
+ }
+
+ // generate and write public key
+ pub, err := ssh.NewPublicKey(&privateKey.PublicKey)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0655)
+}
+
+func TestSftpCreate(t *testing.T) {
+ os.Mkdir("./test", 0777)
+ MakeSSHKeyPair(1024, "./test/id_rsa.pub", "./test/id_rsa")
+
+ go RunSftpServer("./test/")
+ time.Sleep(5 * time.Second)
+
+ ctx, err := SftpConnect("test", "test", "localhost:2022")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ctx.Disconnect()
+
+ var AppFs Fs = SftpFs{
+ SftpClient: ctx.sftpc,
+ }
+
+ AppFs.MkdirAll("test/dir1/dir2/dir3", os.FileMode(0777))
+ AppFs.Mkdir("test/foo", os.FileMode(0000))
+ AppFs.Chmod("test/foo", os.FileMode(0700))
+ AppFs.Mkdir("test/bar", os.FileMode(0777))
+
+ file, err := AppFs.Create("file1")
+ if err != nil {
+ t.Error(err)
+ }
+ defer file.Close()
+
+ file.Write([]byte("hello\t"))
+ file.WriteString("world!\n")
+
+ f1, err := AppFs.Open("file1")
+ if err != nil {
+ log.Fatalf("open: %v", err)
+ }
+ defer f1.Close()
+
+ b := make([]byte, 100)
+
+ _, err = f1.Read(b)
+ fmt.Println(string(b))
+
+ // TODO check here if "hello\tworld\n" is in buffer b
+}
diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go
new file mode 100644
index 000000000..99f9e5db2
--- /dev/null
+++ b/vendor/github.com/spf13/afero/unionFile.go
@@ -0,0 +1,274 @@
+package afero
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// The UnionFile implements the afero.File interface and will be returned
+// when reading a directory present at least in the overlay or opening a file
+// for writing.
+//
+// The calls to
+// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
+// base and the overlay - for files present in both layers, only those
+// from the overlay will be used.
+//
+// When opening files for writing (Create() / OpenFile() with the right flags)
+// the operations will be done in both layers, starting with the overlay. A
+// successful read in the overlay will move the cursor position in the base layer
+// by the number of bytes read.
+type UnionFile struct {
+ base File
+ layer File
+ off int
+ files []os.FileInfo
+}
+
+func (f *UnionFile) Close() error {
+ // first close base, so we have a newer timestamp in the overlay. If we'd close
+ // the overlay first, we'd get a cacheStale the next time we access this file
+ // -> cache would be useless ;-)
+ if f.base != nil {
+ f.base.Close()
+ }
+ if f.layer != nil {
+ return f.layer.Close()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Read(s []byte) (int, error) {
+ if f.layer != nil {
+ n, err := f.layer.Read(s)
+ if (err == nil || err == io.EOF) && f.base != nil {
+ // advance the file position also in the base file, the next
+ // call may be a write at this position (or a seek with SEEK_CUR)
+ if _, seekErr := f.base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
+ // only overwrite err in case the seek fails: we need to
+ // report an eventual io.EOF to the caller
+ err = seekErr
+ }
+ }
+ return n, err
+ }
+ if f.base != nil {
+ return f.base.Read(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
+ if f.layer != nil {
+ n, err := f.layer.ReadAt(s, o)
+ if (err == nil || err == io.EOF) && f.base != nil {
+ _, err = f.base.Seek(o+int64(n), os.SEEK_SET)
+ }
+ return n, err
+ }
+ if f.base != nil {
+ return f.base.ReadAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
+ if f.layer != nil {
+ pos, err = f.layer.Seek(o, w)
+ if (err == nil || err == io.EOF) && f.base != nil {
+ _, err = f.base.Seek(o, w)
+ }
+ return pos, err
+ }
+ if f.base != nil {
+ return f.base.Seek(o, w)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Write(s []byte) (n int, err error) {
+ if f.layer != nil {
+ n, err = f.layer.Write(s)
+ if err == nil && f.base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
+ _, err = f.base.Write(s)
+ }
+ return n, err
+ }
+ if f.base != nil {
+ return f.base.Write(s)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
+ if f.layer != nil {
+ n, err = f.layer.WriteAt(s, o)
+ if err == nil && f.base != nil {
+ _, err = f.base.WriteAt(s, o)
+ }
+ return n, err
+ }
+ if f.base != nil {
+ return f.base.WriteAt(s, o)
+ }
+ return 0, BADFD
+}
+
+func (f *UnionFile) Name() string {
+ if f.layer != nil {
+ return f.layer.Name()
+ }
+ return f.base.Name()
+}
+
+// Readdir will weave the two directories together and
+// return a single view of the overlayed directories
+func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
+ if f.off == 0 {
+ var files = make(map[string]os.FileInfo)
+ var rfi []os.FileInfo
+ if f.layer != nil {
+ rfi, err = f.layer.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, fi := range rfi {
+ files[fi.Name()] = fi
+ }
+ }
+
+ if f.base != nil {
+ rfi, err = f.base.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, fi := range rfi {
+ if _, exists := files[fi.Name()]; !exists {
+ files[fi.Name()] = fi
+ }
+ }
+ }
+ for _, fi := range files {
+ f.files = append(f.files, fi)
+ }
+ }
+ if c == -1 {
+ return f.files[f.off:], nil
+ }
+ defer func() { f.off += c }()
+ return f.files[f.off:c], nil
+}
+
+func (f *UnionFile) Readdirnames(c int) ([]string, error) {
+ rfi, err := f.Readdir(c)
+ if err != nil {
+ return nil, err
+ }
+ var names []string
+ for _, fi := range rfi {
+ names = append(names, fi.Name())
+ }
+ return names, nil
+}
+
+func (f *UnionFile) Stat() (os.FileInfo, error) {
+ if f.layer != nil {
+ return f.layer.Stat()
+ }
+ if f.base != nil {
+ return f.base.Stat()
+ }
+ return nil, BADFD
+}
+
+func (f *UnionFile) Sync() (err error) {
+ if f.layer != nil {
+ err = f.layer.Sync()
+ if err == nil && f.base != nil {
+ err = f.base.Sync()
+ }
+ return err
+ }
+ if f.base != nil {
+ return f.base.Sync()
+ }
+ return BADFD
+}
+
+func (f *UnionFile) Truncate(s int64) (err error) {
+ if f.layer != nil {
+ err = f.layer.Truncate(s)
+ if err == nil && f.base != nil {
+ err = f.base.Truncate(s)
+ }
+ return err
+ }
+ if f.base != nil {
+ return f.base.Truncate(s)
+ }
+ return BADFD
+}
+
+func (f *UnionFile) WriteString(s string) (n int, err error) {
+ if f.layer != nil {
+ n, err = f.layer.WriteString(s)
+ if err == nil && f.base != nil {
+ _, err = f.base.WriteString(s)
+ }
+ return n, err
+ }
+ if f.base != nil {
+ return f.base.WriteString(s)
+ }
+ return 0, BADFD
+}
+
+func copyToLayer(base Fs, layer Fs, name string) error {
+ bfh, err := base.Open(name)
+ if err != nil {
+ return err
+ }
+ defer bfh.Close()
+
+ // First make sure the directory exists
+ exists, err := Exists(layer, filepath.Dir(name))
+ if err != nil {
+ return err
+ }
+ if !exists {
+ err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
+ if err != nil {
+ return err
+ }
+ }
+
+ // Create the file on the overlay
+ lfh, err := layer.Create(name)
+ if err != nil {
+ return err
+ }
+ n, err := io.Copy(lfh, bfh)
+ if err != nil {
+ // If anything fails, clean up the file
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+
+ bfi, err := bfh.Stat()
+ if err != nil || bfi.Size() != n {
+ layer.Remove(name)
+ lfh.Close()
+ return syscall.EIO
+ }
+
+ err = lfh.Close()
+ if err != nil {
+ layer.Remove(name)
+ lfh.Close()
+ return err
+ }
+ return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
+}
diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go
new file mode 100644
index 000000000..2f44e6a10
--- /dev/null
+++ b/vendor/github.com/spf13/afero/util.go
@@ -0,0 +1,331 @@
+// Copyright ©2015 Steve Francia <spf@spf13.com>
+// Portions Copyright ©2015 The Hugo Authors
+// Portions Copyright 2016-present Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+ "unicode"
+
+ "golang.org/x/text/transform"
+ "golang.org/x/text/unicode/norm"
+)
+
+// Filepath separator defined by os.Separator.
+const FilePathSeparator = string(filepath.Separator)
+
+// Takes a reader and a path and writes the content
+func (a Afero) WriteReader(path string, r io.Reader) (err error) {
+ return WriteReader(a.Fs, path, r)
+}
+
+func WriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ if err != os.ErrExist {
+ log.Panicln(err)
+ }
+ }
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+// Same as WriteReader but checks to see if file/directory already exists.
+func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
+ return SafeWriteReader(a.Fs, path, r)
+}
+
+func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
+ dir, _ := filepath.Split(path)
+ ospath := filepath.FromSlash(dir)
+
+ if ospath != "" {
+ err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+ if err != nil {
+ return
+ }
+ }
+
+ exists, err := Exists(fs, path)
+ if err != nil {
+ return
+ }
+ if exists {
+ return fmt.Errorf("%v already exists", path)
+ }
+
+ file, err := fs.Create(path)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ _, err = io.Copy(file, r)
+ return
+}
+
+func (a Afero) GetTempDir(subPath string) string {
+ return GetTempDir(a.Fs, subPath)
+}
+
+// GetTempDir returns the default temp directory with trailing slash
+// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
+func GetTempDir(fs Fs, subPath string) string {
+ addSlash := func(p string) string {
+ if FilePathSeparator != p[len(p)-1:] {
+ p = p + FilePathSeparator
+ }
+ return p
+ }
+ dir := addSlash(os.TempDir())
+
+ if subPath != "" {
+ // preserve windows backslash :-(
+ if FilePathSeparator == "\\" {
+ subPath = strings.Replace(subPath, "\\", "____", -1)
+ }
+ dir = dir + UnicodeSanitize((subPath))
+ if FilePathSeparator == "\\" {
+ dir = strings.Replace(dir, "____", "\\", -1)
+ }
+
+ if exists, _ := Exists(fs, dir); exists {
+ return addSlash(dir)
+ }
+
+ err := fs.MkdirAll(dir, 0777)
+ if err != nil {
+ panic(err)
+ }
+ dir = addSlash(dir)
+ }
+ return dir
+}
+
+// Rewrite string to remove non-standard path characters
+func UnicodeSanitize(s string) string {
+ source := []rune(s)
+ target := make([]rune, 0, len(source))
+
+ for _, r := range source {
+ if unicode.IsLetter(r) ||
+ unicode.IsDigit(r) ||
+ unicode.IsMark(r) ||
+ r == '.' ||
+ r == '/' ||
+ r == '\\' ||
+ r == '_' ||
+ r == '-' ||
+ r == '%' ||
+ r == ' ' ||
+ r == '#' {
+ target = append(target, r)
+ }
+ }
+
+ return string(target)
+}
+
+// Transform characters with accents into plan forms
+func NeuterAccents(s string) string {
+ t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
+ result, _, _ := transform.String(t, string(s))
+
+ return result
+}
+
+func isMn(r rune) bool {
+ return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
+}
+
+func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
+ return FileContainsBytes(a.Fs, filename, subslice)
+}
+
+// Check if a file contains a specified byte slice.
+func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslice), nil
+}
+
+func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
+ return FileContainsAnyBytes(a.Fs, filename, subslices)
+}
+
+// Check if a file contains any of the specified byte slices.
+func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ return readerContainsAny(f, subslices...), nil
+}
+
+// readerContains reports whether any of the subslices is within r.
+func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
+
+ if r == nil || len(subslices) == 0 {
+ return false
+ }
+
+ largestSlice := 0
+
+ for _, sl := range subslices {
+ if len(sl) > largestSlice {
+ largestSlice = len(sl)
+ }
+ }
+
+ if largestSlice == 0 {
+ return false
+ }
+
+ bufflen := largestSlice * 4
+ halflen := bufflen / 2
+ buff := make([]byte, bufflen)
+ var err error
+ var n, i int
+
+ for {
+ i++
+ if i == 1 {
+ n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
+ } else {
+ if i != 2 {
+ // shift left to catch overlapping matches
+ copy(buff[:], buff[halflen:])
+ }
+ n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
+ }
+
+ if n > 0 {
+ for _, sl := range subslices {
+ if bytes.Contains(buff, sl) {
+ return true
+ }
+ }
+ }
+
+ if err != nil {
+ break
+ }
+ }
+ return false
+}
+
+func (a Afero) DirExists(path string) (bool, error) {
+ return DirExists(a.Fs, path)
+}
+
+// DirExists checks if a path exists and is a directory.
+func DirExists(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err == nil && fi.IsDir() {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func (a Afero) IsDir(path string) (bool, error) {
+ return IsDir(a.Fs, path)
+}
+
+// IsDir checks if a given path is a directory.
+func IsDir(fs Fs, path string) (bool, error) {
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ return fi.IsDir(), nil
+}
+
+func (a Afero) IsEmpty(path string) (bool, error) {
+ return IsEmpty(a.Fs, path)
+}
+
+// IsEmpty checks if a given file or directory is empty.
+func IsEmpty(fs Fs, path string) (bool, error) {
+ if b, _ := Exists(fs, path); !b {
+ return false, fmt.Errorf("%q path does not exist", path)
+ }
+ fi, err := fs.Stat(path)
+ if err != nil {
+ return false, err
+ }
+ if fi.IsDir() {
+ f, err := fs.Open(path)
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+ list, err := f.Readdir(-1)
+ return len(list) == 0, nil
+ }
+ return fi.Size() == 0, nil
+}
+
+func (a Afero) Exists(path string) (bool, error) {
+ return Exists(a.Fs, path)
+}
+
+// Check if a file or directory exists.
+func Exists(fs Fs, path string) (bool, error) {
+ _, err := fs.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
+ combinedPath := filepath.Join(basePathFs.path, relativePath)
+ if parent, ok := basePathFs.source.(*BasePathFs); ok {
+ return FullBaseFsPath(parent, combinedPath)
+ }
+
+ return combinedPath
+}
diff --git a/vendor/github.com/spf13/afero/util_test.go b/vendor/github.com/spf13/afero/util_test.go
new file mode 100644
index 000000000..b5852f184
--- /dev/null
+++ b/vendor/github.com/spf13/afero/util_test.go
@@ -0,0 +1,450 @@
+// Copyright ©2015 Steve Francia <spf@spf13.com>
+// Portions Copyright ©2015 The Hugo Authors
+//
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package afero
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+var testFS = new(MemMapFs)
+
+func TestDirExists(t *testing.T) {
+ type test struct {
+ input string
+ expected bool
+ }
+
+ // First create a couple directories so there is something in the filesystem
+ //testFS := new(MemMapFs)
+ testFS.MkdirAll("/foo/bar", 0777)
+
+ data := []test{
+ {".", true},
+ {"./", true},
+ {"..", true},
+ {"../", true},
+ {"./..", true},
+ {"./../", true},
+ {"/foo/", true},
+ {"/foo", true},
+ {"/foo/bar", true},
+ {"/foo/bar/", true},
+ {"/", true},
+ {"/some-really-random-directory-name", false},
+ {"/some/really/random/directory/name", false},
+ {"./some-really-random-local-directory-name", false},
+ {"./some/really/random/local/directory/name", false},
+ }
+
+ for i, d := range data {
+ exists, _ := DirExists(testFS, filepath.FromSlash(d.input))
+ if d.expected != exists {
+ t.Errorf("Test %d %q failed. Expected %t got %t", i, d.input, d.expected, exists)
+ }
+ }
+}
+
+func TestIsDir(t *testing.T) {
+ testFS = new(MemMapFs)
+
+ type test struct {
+ input string
+ expected bool
+ }
+ data := []test{
+ {"./", true},
+ {"/", true},
+ {"./this-directory-does-not-existi", false},
+ {"/this-absolute-directory/does-not-exist", false},
+ }
+
+ for i, d := range data {
+
+ exists, _ := IsDir(testFS, d.input)
+ if d.expected != exists {
+ t.Errorf("Test %d failed. Expected %t got %t", i, d.expected, exists)
+ }
+ }
+}
+
+func TestIsEmpty(t *testing.T) {
+ testFS = new(MemMapFs)
+
+ zeroSizedFile, _ := createZeroSizedFileInTempDir()
+ defer deleteFileInTempDir(zeroSizedFile)
+ nonZeroSizedFile, _ := createNonZeroSizedFileInTempDir()
+ defer deleteFileInTempDir(nonZeroSizedFile)
+ emptyDirectory, _ := createEmptyTempDir()
+ defer deleteTempDir(emptyDirectory)
+ nonEmptyZeroLengthFilesDirectory, _ := createTempDirWithZeroLengthFiles()
+ defer deleteTempDir(nonEmptyZeroLengthFilesDirectory)
+ nonEmptyNonZeroLengthFilesDirectory, _ := createTempDirWithNonZeroLengthFiles()
+ defer deleteTempDir(nonEmptyNonZeroLengthFilesDirectory)
+ nonExistentFile := os.TempDir() + "/this-file-does-not-exist.txt"
+ nonExistentDir := os.TempDir() + "/this/direcotry/does/not/exist/"
+
+ fileDoesNotExist := fmt.Errorf("%q path does not exist", nonExistentFile)
+ dirDoesNotExist := fmt.Errorf("%q path does not exist", nonExistentDir)
+
+ type test struct {
+ input string
+ expectedResult bool
+ expectedErr error
+ }
+
+ data := []test{
+ {zeroSizedFile.Name(), true, nil},
+ {nonZeroSizedFile.Name(), false, nil},
+ {emptyDirectory, true, nil},
+ {nonEmptyZeroLengthFilesDirectory, false, nil},
+ {nonEmptyNonZeroLengthFilesDirectory, false, nil},
+ {nonExistentFile, false, fileDoesNotExist},
+ {nonExistentDir, false, dirDoesNotExist},
+ }
+ for i, d := range data {
+ exists, err := IsEmpty(testFS, d.input)
+ if d.expectedResult != exists {
+ t.Errorf("Test %d %q failed exists. Expected result %t got %t", i, d.input, d.expectedResult, exists)
+ }
+ if d.expectedErr != nil {
+ if d.expectedErr.Error() != err.Error() {
+ t.Errorf("Test %d failed with err. Expected %q(%#v) got %q(%#v)", i, d.expectedErr, d.expectedErr, err, err)
+ }
+ } else {
+ if d.expectedErr != err {
+ t.Errorf("Test %d failed. Expected error %q(%#v) got %q(%#v)", i, d.expectedErr, d.expectedErr, err, err)
+ }
+ }
+ }
+}
+
+func TestReaderContains(t *testing.T) {
+ for i, this := range []struct {
+ v1 string
+ v2 [][]byte
+ expect bool
+ }{
+ {"abc", [][]byte{[]byte("a")}, true},
+ {"abc", [][]byte{[]byte("b")}, true},
+ {"abcdefg", [][]byte{[]byte("efg")}, true},
+ {"abc", [][]byte{[]byte("d")}, false},
+ {"abc", [][]byte{[]byte("d"), []byte("e")}, false},
+ {"abc", [][]byte{[]byte("d"), []byte("a")}, true},
+ {"abc", [][]byte{[]byte("b"), []byte("e")}, true},
+ {"", nil, false},
+ {"", [][]byte{[]byte("a")}, false},
+ {"a", [][]byte{[]byte("")}, false},
+ {"", [][]byte{[]byte("")}, false}} {
+ result := readerContainsAny(strings.NewReader(this.v1), this.v2...)
+ if result != this.expect {
+ t.Errorf("[%d] readerContains: got %t but expected %t", i, result, this.expect)
+ }
+ }
+
+ if readerContainsAny(nil, []byte("a")) {
+ t.Error("readerContains with nil reader")
+ }
+
+ if readerContainsAny(nil, nil) {
+ t.Error("readerContains with nil arguments")
+ }
+}
+
+func createZeroSizedFileInTempDir() (File, error) {
+ filePrefix := "_path_test_"
+ f, e := TempFile(testFS, "", filePrefix) // dir is os.TempDir()
+ if e != nil {
+ // if there was an error no file was created.
+ // => no requirement to delete the file
+ return nil, e
+ }
+ return f, nil
+}
+
+func createNonZeroSizedFileInTempDir() (File, error) {
+ f, err := createZeroSizedFileInTempDir()
+ if err != nil {
+ // no file ??
+ }
+ byteString := []byte("byteString")
+ err = WriteFile(testFS, f.Name(), byteString, 0644)
+ if err != nil {
+ // delete the file
+ deleteFileInTempDir(f)
+ return nil, err
+ }
+ return f, nil
+}
+
+func deleteFileInTempDir(f File) {
+ err := testFS.Remove(f.Name())
+ if err != nil {
+ // now what?
+ }
+}
+
+func createEmptyTempDir() (string, error) {
+ dirPrefix := "_dir_prefix_"
+ d, e := TempDir(testFS, "", dirPrefix) // will be in os.TempDir()
+ if e != nil {
+ // no directory to delete - it was never created
+ return "", e
+ }
+ return d, nil
+}
+
+func createTempDirWithZeroLengthFiles() (string, error) {
+ d, dirErr := createEmptyTempDir()
+ if dirErr != nil {
+ //now what?
+ }
+ filePrefix := "_path_test_"
+ _, fileErr := TempFile(testFS, d, filePrefix) // dir is os.TempDir()
+ if fileErr != nil {
+ // if there was an error no file was created.
+ // but we need to remove the directory to clean-up
+ deleteTempDir(d)
+ return "", fileErr
+ }
+ // the dir now has one, zero length file in it
+ return d, nil
+
+}
+
+func createTempDirWithNonZeroLengthFiles() (string, error) {
+ d, dirErr := createEmptyTempDir()
+ if dirErr != nil {
+ //now what?
+ }
+ filePrefix := "_path_test_"
+ f, fileErr := TempFile(testFS, d, filePrefix) // dir is os.TempDir()
+ if fileErr != nil {
+ // if there was an error no file was created.
+ // but we need to remove the directory to clean-up
+ deleteTempDir(d)
+ return "", fileErr
+ }
+ byteString := []byte("byteString")
+ fileErr = WriteFile(testFS, f.Name(), byteString, 0644)
+ if fileErr != nil {
+ // delete the file
+ deleteFileInTempDir(f)
+ // also delete the directory
+ deleteTempDir(d)
+ return "", fileErr
+ }
+
+ // the dir now has one, zero length file in it
+ return d, nil
+
+}
+
+func TestExists(t *testing.T) {
+ zeroSizedFile, _ := createZeroSizedFileInTempDir()
+ defer deleteFileInTempDir(zeroSizedFile)
+ nonZeroSizedFile, _ := createNonZeroSizedFileInTempDir()
+ defer deleteFileInTempDir(nonZeroSizedFile)
+ emptyDirectory, _ := createEmptyTempDir()
+ defer deleteTempDir(emptyDirectory)
+ nonExistentFile := os.TempDir() + "/this-file-does-not-exist.txt"
+ nonExistentDir := os.TempDir() + "/this/direcotry/does/not/exist/"
+
+ type test struct {
+ input string
+ expectedResult bool
+ expectedErr error
+ }
+
+ data := []test{
+ {zeroSizedFile.Name(), true, nil},
+ {nonZeroSizedFile.Name(), true, nil},
+ {emptyDirectory, true, nil},
+ {nonExistentFile, false, nil},
+ {nonExistentDir, false, nil},
+ }
+ for i, d := range data {
+ exists, err := Exists(testFS, d.input)
+ if d.expectedResult != exists {
+ t.Errorf("Test %d failed. Expected result %t got %t", i, d.expectedResult, exists)
+ }
+ if d.expectedErr != err {
+ t.Errorf("Test %d failed. Expected %q got %q", i, d.expectedErr, err)
+ }
+ }
+
+}
+
+func TestSafeWriteToDisk(t *testing.T) {
+ emptyFile, _ := createZeroSizedFileInTempDir()
+ defer deleteFileInTempDir(emptyFile)
+ tmpDir, _ := createEmptyTempDir()
+ defer deleteTempDir(tmpDir)
+
+ randomString := "This is a random string!"
+ reader := strings.NewReader(randomString)
+
+ fileExists := fmt.Errorf("%v already exists", emptyFile.Name())
+
+ type test struct {
+ filename string
+ expectedErr error
+ }
+
+ now := time.Now().Unix()
+ nowStr := strconv.FormatInt(now, 10)
+ data := []test{
+ {emptyFile.Name(), fileExists},
+ {tmpDir + "/" + nowStr, nil},
+ }
+
+ for i, d := range data {
+ e := SafeWriteReader(testFS, d.filename, reader)
+ if d.expectedErr != nil {
+ if d.expectedErr.Error() != e.Error() {
+ t.Errorf("Test %d failed. Expected error %q but got %q", i, d.expectedErr.Error(), e.Error())
+ }
+ } else {
+ if d.expectedErr != e {
+ t.Errorf("Test %d failed. Expected %q but got %q", i, d.expectedErr, e)
+ }
+ contents, _ := ReadFile(testFS, d.filename)
+ if randomString != string(contents) {
+ t.Errorf("Test %d failed. Expected contents %q but got %q", i, randomString, string(contents))
+ }
+ }
+ reader.Seek(0, 0)
+ }
+}
+
+func TestWriteToDisk(t *testing.T) {
+ emptyFile, _ := createZeroSizedFileInTempDir()
+ defer deleteFileInTempDir(emptyFile)
+ tmpDir, _ := createEmptyTempDir()
+ defer deleteTempDir(tmpDir)
+
+ randomString := "This is a random string!"
+ reader := strings.NewReader(randomString)
+
+ type test struct {
+ filename string
+ expectedErr error
+ }
+
+ now := time.Now().Unix()
+ nowStr := strconv.FormatInt(now, 10)
+ data := []test{
+ {emptyFile.Name(), nil},
+ {tmpDir + "/" + nowStr, nil},
+ }
+
+ for i, d := range data {
+ e := WriteReader(testFS, d.filename, reader)
+ if d.expectedErr != e {
+ t.Errorf("Test %d failed. WriteToDisk Error Expected %q but got %q", i, d.expectedErr, e)
+ }
+ contents, e := ReadFile(testFS, d.filename)
+ if e != nil {
+ t.Errorf("Test %d failed. Could not read file %s. Reason: %s\n", i, d.filename, e)
+ }
+ if randomString != string(contents) {
+ t.Errorf("Test %d failed. Expected contents %q but got %q", i, randomString, string(contents))
+ }
+ reader.Seek(0, 0)
+ }
+}
+
+func TestGetTempDir(t *testing.T) {
+ dir := os.TempDir()
+ if FilePathSeparator != dir[len(dir)-1:] {
+ dir = dir + FilePathSeparator
+ }
+ testDir := "hugoTestFolder" + FilePathSeparator
+ tests := []struct {
+ input string
+ expected string
+ }{
+ {"", dir},
+ {testDir + " Foo bar ", dir + testDir + " Foo bar " + FilePathSeparator},
+ {testDir + "Foo.Bar/foo_Bar-Foo", dir + testDir + "Foo.Bar/foo_Bar-Foo" + FilePathSeparator},
+ {testDir + "fOO,bar:foo%bAR", dir + testDir + "fOObarfoo%bAR" + FilePathSeparator},
+ {testDir + "FOo/BaR.html", dir + testDir + "FOo/BaR.html" + FilePathSeparator},
+ {testDir + "трям/трям", dir + testDir + "трям/трям" + FilePathSeparator},
+ {testDir + "은행", dir + testDir + "은행" + FilePathSeparator},
+ {testDir + "Банковский кассир", dir + testDir + "Банковский кассир" + FilePathSeparator},
+ }
+
+ for _, test := range tests {
+ output := GetTempDir(new(MemMapFs), test.input)
+ if output != test.expected {
+ t.Errorf("Expected %#v, got %#v\n", test.expected, output)
+ }
+ }
+}
+
+// This function is very dangerous. Don't use it.
+func deleteTempDir(d string) {
+ err := os.RemoveAll(d)
+ if err != nil {
+ // now what?
+ }
+}
+
+func TestFullBaseFsPath(t *testing.T) {
+ type dirSpec struct {
+ Dir1, Dir2, Dir3 string
+ }
+ dirSpecs := []dirSpec{
+ dirSpec{Dir1: "/", Dir2: "/", Dir3: "/"},
+ dirSpec{Dir1: "/", Dir2: "/path2", Dir3: "/"},
+ dirSpec{Dir1: "/path1/dir", Dir2: "/path2/dir/", Dir3: "/path3/dir"},
+ dirSpec{Dir1: "C:/path1", Dir2: "path2/dir", Dir3: "/path3/dir/"},
+ }
+
+ for _, ds := range dirSpecs {
+ memFs := NewMemMapFs()
+ level1Fs := NewBasePathFs(memFs, ds.Dir1)
+ level2Fs := NewBasePathFs(level1Fs, ds.Dir2)
+ level3Fs := NewBasePathFs(level2Fs, ds.Dir3)
+
+ type spec struct {
+ BaseFs Fs
+ FileName string
+ ExpectedPath string
+ }
+ specs := []spec{
+ spec{BaseFs: level3Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, ds.Dir3, "f.txt")},
+ spec{BaseFs: level3Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, ds.Dir3, "")},
+ spec{BaseFs: level2Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, "f.txt")},
+ spec{BaseFs: level2Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, "")},
+ spec{BaseFs: level1Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, "f.txt")},
+ spec{BaseFs: level1Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, "")},
+ }
+
+ for _, s := range specs {
+ if actualPath := FullBaseFsPath(s.BaseFs.(*BasePathFs), s.FileName); actualPath != s.ExpectedPath {
+ t.Errorf("Expected \n%s got \n%s", s.ExpectedPath, actualPath)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/cast/.gitignore b/vendor/github.com/spf13/cast/.gitignore
new file mode 100644
index 000000000..53053a8ac
--- /dev/null
+++ b/vendor/github.com/spf13/cast/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+
+*.bench
diff --git a/vendor/github.com/spf13/cast/.travis.yml b/vendor/github.com/spf13/cast/.travis.yml
new file mode 100644
index 000000000..4da976684
--- /dev/null
+++ b/vendor/github.com/spf13/cast/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+sudo: required
+go:
+ - 1.7.5
+ - 1.8
+ - tip
+os:
+ - linux
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+script:
+ - make check
diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/vendor/github.com/spf13/cast/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile
new file mode 100644
index 000000000..7ccf8930b
--- /dev/null
+++ b/vendor/github.com/spf13/cast/Makefile
@@ -0,0 +1,38 @@
+# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+
+.PHONY: check fmt lint test test-race vet test-cover-html help
+.DEFAULT_GOAL := help
+
+check: test-race fmt vet lint ## Run tests and linters
+
+test: ## Run tests
+ go test ./...
+
+test-race: ## Run tests with race detector
+ go test -race ./...
+
+fmt: ## Run gofmt linter
+ @for d in `go list` ; do \
+ if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
+ echo "^ improperly formatted go files" && echo && exit 1; \
+ fi \
+ done
+
+lint: ## Run golint linter
+ @for d in `go list` ; do \
+ if [ "`golint $$d | tee /dev/stderr`" ]; then \
+ echo "^ golint errors!" && echo && exit 1; \
+ fi \
+ done
+
+vet: ## Run go vet linter
+ @if [ "`go vet | tee /dev/stderr`" ]; then \
+ echo "^ go vet errors!" && echo && exit 1; \
+ fi
+
+test-cover-html: ## Generate test coverage report
+ go test -coverprofile=coverage.out -covermode=count
+ go tool cover -func=coverage.out
+
+help:
+ @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md
new file mode 100644
index 000000000..e6939397d
--- /dev/null
+++ b/vendor/github.com/spf13/cast/README.md
@@ -0,0 +1,75 @@
+cast
+====
+[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast)
+[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast)
+
+Easy and safe casting from one type to another in Go
+
+Don’t Panic! ... Cast
+
+## What is Cast?
+
+Cast is a library to convert between different go types in a consistent and easy way.
+
+Cast provides simple functions to easily convert a number to a string, an
+interface into a bool, etc. Cast does this intelligently when an obvious
+conversion is possible. It doesn’t make any attempts to guess what you meant,
+for example you can only convert a string to an int when it is a string
+representation of an int such as “8”. Cast was developed for use in
+[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON
+for meta data.
+
+## Why use Cast?
+
+When working with dynamic data in Go you often need to cast or convert the data
+from one type into another. Cast goes beyond just using type assertion (though
+it uses that when possible) to provide a very straightforward and convenient
+library.
+
+If you are working with interfaces to handle things like dynamic content
+you’ll need an easy way to convert an interface into a given type. This
+is the library for you.
+
+If you are taking in data from YAML, TOML or JSON or other formats which lack
+full types, then Cast is the library for you.
+
+## Usage
+
+Cast provides a handful of To_____ methods. These methods will always return
+the desired type. **If input is provided that will not convert to that type, the
+0 or nil value for that type will be returned**.
+
+Cast also provides identical methods To_____E. These return the same result as
+the To_____ methods, plus an additional error which tells you if it successfully
+converted. Using these methods you can tell the difference between when the
+input matched the zero value or when the conversion failed and the zero value
+was returned.
+
+The following examples are merely a sample of what is available. Please review
+the code for a complete set.
+
+### Example ‘ToString’:
+
+ cast.ToString("mayonegg") // "mayonegg"
+ cast.ToString(8) // "8"
+ cast.ToString(8.31) // "8.31"
+ cast.ToString([]byte("one time")) // "one time"
+ cast.ToString(nil) // ""
+
+ var foo interface{} = "one more time"
+ cast.ToString(foo) // "one more time"
+
+
+### Example ‘ToInt’:
+
+ cast.ToInt(8) // 8
+ cast.ToInt(8.31) // 8
+ cast.ToInt("8") // 8
+ cast.ToInt(true) // 1
+ cast.ToInt(false) // 0
+
+ var eight interface{} = 8
+ cast.ToInt(eight) // 8
+ cast.ToInt(nil) // 0
+
diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go
new file mode 100644
index 000000000..dc504b432
--- /dev/null
+++ b/vendor/github.com/spf13/cast/cast.go
@@ -0,0 +1,153 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Package cast provides easy and safe casting in Go.
+package cast
+
+import "time"
+
+// ToBool casts an interface to a bool type.
+func ToBool(i interface{}) bool {
+ v, _ := ToBoolE(i)
+ return v
+}
+
+// ToTime casts an interface to a time.Time type.
+func ToTime(i interface{}) time.Time {
+ v, _ := ToTimeE(i)
+ return v
+}
+
+// ToDuration casts an interface to a time.Duration type.
+func ToDuration(i interface{}) time.Duration {
+ v, _ := ToDurationE(i)
+ return v
+}
+
+// ToFloat64 casts an interface to a float64 type.
+func ToFloat64(i interface{}) float64 {
+ v, _ := ToFloat64E(i)
+ return v
+}
+
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+ v, _ := ToFloat32E(i)
+ return v
+}
+
+// ToInt64 casts an interface to an int64 type.
+func ToInt64(i interface{}) int64 {
+ v, _ := ToInt64E(i)
+ return v
+}
+
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+ v, _ := ToInt32E(i)
+ return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+ v, _ := ToInt16E(i)
+ return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+ v, _ := ToInt8E(i)
+ return v
+}
+
+// ToInt casts an interface to an int type.
+func ToInt(i interface{}) int {
+ v, _ := ToIntE(i)
+ return v
+}
+
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+ v, _ := ToUintE(i)
+ return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+ v, _ := ToUint64E(i)
+ return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+ v, _ := ToUint32E(i)
+ return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+ v, _ := ToUint16E(i)
+ return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+ v, _ := ToUint8E(i)
+ return v
+}
+
+// ToString casts an interface to a string type.
+func ToString(i interface{}) string {
+ v, _ := ToStringE(i)
+ return v
+}
+
+// ToStringMapString casts an interface to a map[string]string type.
+func ToStringMapString(i interface{}) map[string]string {
+ v, _ := ToStringMapStringE(i)
+ return v
+}
+
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
+func ToStringMapStringSlice(i interface{}) map[string][]string {
+ v, _ := ToStringMapStringSliceE(i)
+ return v
+}
+
+// ToStringMapBool casts an interface to a map[string]bool type.
+func ToStringMapBool(i interface{}) map[string]bool {
+ v, _ := ToStringMapBoolE(i)
+ return v
+}
+
+// ToStringMap casts an interface to a map[string]interface{} type.
+func ToStringMap(i interface{}) map[string]interface{} {
+ v, _ := ToStringMapE(i)
+ return v
+}
+
+// ToSlice casts an interface to a []interface{} type.
+func ToSlice(i interface{}) []interface{} {
+ v, _ := ToSliceE(i)
+ return v
+}
+
+// ToBoolSlice casts an interface to a []bool type.
+func ToBoolSlice(i interface{}) []bool {
+ v, _ := ToBoolSliceE(i)
+ return v
+}
+
+// ToStringSlice casts an interface to a []string type.
+func ToStringSlice(i interface{}) []string {
+ v, _ := ToStringSliceE(i)
+ return v
+}
+
+// ToIntSlice casts an interface to a []int type.
+func ToIntSlice(i interface{}) []int {
+ v, _ := ToIntSliceE(i)
+ return v
+}
diff --git a/vendor/github.com/spf13/cast/cast_test.go b/vendor/github.com/spf13/cast/cast_test.go
new file mode 100644
index 000000000..2bb8c5f54
--- /dev/null
+++ b/vendor/github.com/spf13/cast/cast_test.go
@@ -0,0 +1,1151 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package cast
+
+import (
+ "fmt"
+ "html/template"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestToUintE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect uint
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ // errors
+ {int(-8), 0, true},
+ {int8(-8), 0, true},
+ {int16(-8), 0, true},
+ {int32(-8), 0, true},
+ {int64(-8), 0, true},
+ {float32(-8.31), 0, true},
+ {float64(-8.31), 0, true},
+ {"-8", 0, true},
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToUintE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test:
+ v = ToUint(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToUint64E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect uint64
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ // errors
+ {int(-8), 0, true},
+ {int8(-8), 0, true},
+ {int16(-8), 0, true},
+ {int32(-8), 0, true},
+ {int64(-8), 0, true},
+ {float32(-8.31), 0, true},
+ {float64(-8.31), 0, true},
+ {"-8", 0, true},
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToUint64E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test:
+ v = ToUint64(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToUint32E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect uint32
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ {int(-8), 0, true},
+ {int8(-8), 0, true},
+ {int16(-8), 0, true},
+ {int32(-8), 0, true},
+ {int64(-8), 0, true},
+ {float32(-8.31), 0, true},
+ {float64(-8.31), 0, true},
+ {"-8", 0, true},
+ // errors
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToUint32E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test:
+ v = ToUint32(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToUint16E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect uint16
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ // errors
+ {int(-8), 0, true},
+ {int8(-8), 0, true},
+ {int16(-8), 0, true},
+ {int32(-8), 0, true},
+ {int64(-8), 0, true},
+ {float32(-8.31), 0, true},
+ {float64(-8.31), 0, true},
+ {"-8", 0, true},
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToUint16E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToUint16(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToUint8E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect uint8
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ // errors
+ {int(-8), 0, true},
+ {int8(-8), 0, true},
+ {int16(-8), 0, true},
+ {int32(-8), 0, true},
+ {int64(-8), 0, true},
+ {float32(-8.31), 0, true},
+ {float64(-8.31), 0, true},
+ {"-8", 0, true},
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToUint8E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToUint8(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToIntE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect int
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ // errors
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToIntE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToInt(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToInt64E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect int64
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ // errors
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToInt64E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToInt64(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToInt32E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect int32
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ // errors
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToInt32E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToInt32(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToInt16E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect int16
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ // errors
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToInt16E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToInt16(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToInt8E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect int8
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8, false},
+ {float64(8.31), 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ {"8", 8, false},
+ {nil, 0, false},
+ // errors
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToInt8E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToInt8(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToFloat64E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect float64
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8), 8, false},
+ {float64(8.31), 8.31, false},
+ {"8", 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ // errors
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToFloat64E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToFloat64(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToFloat32E(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect float32
+ iserr bool
+ }{
+ {int(8), 8, false},
+ {int8(8), 8, false},
+ {int16(8), 8, false},
+ {int32(8), 8, false},
+ {int64(8), 8, false},
+ {uint(8), 8, false},
+ {uint8(8), 8, false},
+ {uint16(8), 8, false},
+ {uint32(8), 8, false},
+ {uint64(8), 8, false},
+ {float32(8.31), 8.31, false},
+ {float64(8.31), 8.31, false},
+ {"8", 8, false},
+ {true, 1, false},
+ {false, 0, false},
+ // errors
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToFloat32E(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToFloat32(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToStringE(t *testing.T) {
+ type Key struct {
+ k string
+ }
+ key := &Key{"foo"}
+
+ tests := []struct {
+ input interface{}
+ expect string
+ iserr bool
+ }{
+ {int(8), "8", false},
+ {int8(8), "8", false},
+ {int16(8), "8", false},
+ {int32(8), "8", false},
+ {int64(8), "8", false},
+ {uint(8), "8", false},
+ {uint8(8), "8", false},
+ {uint16(8), "8", false},
+ {uint32(8), "8", false},
+ {uint64(8), "8", false},
+ {float32(8.31), "8.31", false},
+ {float64(8.31), "8.31", false},
+ {true, "true", false},
+ {false, "false", false},
+ {nil, "", false},
+ {[]byte("one time"), "one time", false},
+ {"one more time", "one more time", false},
+ {template.HTML("one time"), "one time", false},
+ {template.URL("http://somehost.foo"), "http://somehost.foo", false},
+ {template.JS("(1+2)"), "(1+2)", false},
+ {template.CSS("a"), "a", false},
+ {template.HTMLAttr("a"), "a", false},
+ // errors
+ {testing.T{}, "", true},
+ {key, "", true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToStringE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToString(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+type foo struct {
+ val string
+}
+
+func (x foo) String() string {
+ return x.val
+}
+
+func TestStringerToString(t *testing.T) {
+ var x foo
+ x.val = "bar"
+ assert.Equal(t, "bar", ToString(x))
+}
+
+type fu struct {
+ val string
+}
+
+func (x fu) Error() string {
+ return x.val
+}
+
+func TestErrorToString(t *testing.T) {
+ var x fu
+ x.val = "bar"
+ assert.Equal(t, "bar", ToString(x))
+}
+
+func TestStringMapStringSliceE(t *testing.T) {
+ // ToStringMapString inputs/outputs
+ var stringMapString = map[string]string{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"}
+ var stringMapInterface = map[string]interface{}{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"}
+ var interfaceMapString = map[interface{}]string{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"}
+ var interfaceMapInterface = map[interface{}]interface{}{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"}
+
+ // ToStringMapStringSlice inputs/outputs
+ var stringMapStringSlice = map[string][]string{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}}
+ var stringMapInterfaceSlice = map[string][]interface{}{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}}
+ var stringMapInterfaceInterfaceSlice = map[string]interface{}{"key 1": []interface{}{"value 1", "value 2", "value 3"}, "key 2": []interface{}{"value 1", "value 2", "value 3"}, "key 3": []interface{}{"value 1", "value 2", "value 3"}}
+ var stringMapStringSingleSliceFieldsResult = map[string][]string{"key 1": {"value", "1"}, "key 2": {"value", "2"}, "key 3": {"value", "3"}}
+ var interfaceMapStringSlice = map[interface{}][]string{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}}
+ var interfaceMapInterfaceSlice = map[interface{}][]interface{}{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}}
+
+ var stringMapStringSliceMultiple = map[string][]string{"key 1": {"value 1", "value 2", "value 3"}, "key 2": {"value 1", "value 2", "value 3"}, "key 3": {"value 1", "value 2", "value 3"}}
+ var stringMapStringSliceSingle = map[string][]string{"key 1": {"value 1"}, "key 2": {"value 2"}, "key 3": {"value 3"}}
+
+ var stringMapInterface1 = map[string]interface{}{"key 1": []string{"value 1"}, "key 2": []string{"value 2"}}
+ var stringMapInterfaceResult1 = map[string][]string{"key 1": {"value 1"}, "key 2": {"value 2"}}
+
+ type Key struct {
+ k string
+ }
+
+ tests := []struct {
+ input interface{}
+ expect map[string][]string
+ iserr bool
+ }{
+ {stringMapStringSlice, stringMapStringSlice, false},
+ {stringMapInterfaceSlice, stringMapStringSlice, false},
+ {stringMapInterfaceInterfaceSlice, stringMapStringSlice, false},
+ {stringMapStringSliceMultiple, stringMapStringSlice, false},
+ {stringMapStringSliceMultiple, stringMapStringSlice, false},
+ {stringMapString, stringMapStringSliceSingle, false},
+ {stringMapInterface, stringMapStringSliceSingle, false},
+ {stringMapInterface1, stringMapInterfaceResult1, false},
+ {interfaceMapStringSlice, stringMapStringSlice, false},
+ {interfaceMapInterfaceSlice, stringMapStringSlice, false},
+ {interfaceMapString, stringMapStringSingleSliceFieldsResult, false},
+ {interfaceMapInterface, stringMapStringSingleSliceFieldsResult, false},
+ // errors
+ {nil, nil, true},
+ {testing.T{}, nil, true},
+ {map[interface{}]interface{}{"foo": testing.T{}}, nil, true},
+ {map[interface{}]interface{}{Key{"foo"}: "bar"}, nil, true}, // ToStringE(Key{"foo"}) should fail
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToStringMapStringSliceE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToStringMapStringSlice(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToStringMapE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect map[string]interface{}
+ iserr bool
+ }{
+ {map[interface{}]interface{}{"tag": "tags", "group": "groups"}, map[string]interface{}{"tag": "tags", "group": "groups"}, false},
+ {map[string]interface{}{"tag": "tags", "group": "groups"}, map[string]interface{}{"tag": "tags", "group": "groups"}, false},
+ // errors
+ {nil, nil, true},
+ {testing.T{}, nil, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToStringMapE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToStringMap(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToStringMapBoolE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect map[string]bool
+ iserr bool
+ }{
+ {map[interface{}]interface{}{"v1": true, "v2": false}, map[string]bool{"v1": true, "v2": false}, false},
+ {map[string]interface{}{"v1": true, "v2": false}, map[string]bool{"v1": true, "v2": false}, false},
+ {map[string]bool{"v1": true, "v2": false}, map[string]bool{"v1": true, "v2": false}, false},
+ // errors
+ {nil, nil, true},
+ {testing.T{}, nil, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToStringMapBoolE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToStringMapBool(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToStringMapStringE(t *testing.T) {
+ var stringMapString = map[string]string{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"}
+ var stringMapInterface = map[string]interface{}{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"}
+ var interfaceMapString = map[interface{}]string{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"}
+ var interfaceMapInterface = map[interface{}]interface{}{"key 1": "value 1", "key 2": "value 2", "key 3": "value 3"}
+
+ tests := []struct {
+ input interface{}
+ expect map[string]string
+ iserr bool
+ }{
+ {stringMapString, stringMapString, false},
+ {stringMapInterface, stringMapString, false},
+ {interfaceMapString, stringMapString, false},
+ {interfaceMapInterface, stringMapString, false},
+ // errors
+ {nil, nil, true},
+ {testing.T{}, nil, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToStringMapStringE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToStringMapString(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToBoolSliceE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect []bool
+ iserr bool
+ }{
+ {[]bool{true, false, true}, []bool{true, false, true}, false},
+ {[]interface{}{true, false, true}, []bool{true, false, true}, false},
+ {[]int{1, 0, 1}, []bool{true, false, true}, false},
+ {[]string{"true", "false", "true"}, []bool{true, false, true}, false},
+ // errors
+ {nil, nil, true},
+ {testing.T{}, nil, true},
+ {[]string{"foo", "bar"}, nil, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToBoolSliceE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToBoolSlice(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToIntSliceE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect []int
+ iserr bool
+ }{
+ {[]int{1, 3}, []int{1, 3}, false},
+ {[]interface{}{1.2, 3.2}, []int{1, 3}, false},
+ {[]string{"2", "3"}, []int{2, 3}, false},
+ {[2]string{"2", "3"}, []int{2, 3}, false},
+ // errors
+ {nil, nil, true},
+ {testing.T{}, nil, true},
+ {[]string{"foo", "bar"}, nil, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToIntSliceE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToIntSlice(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToSliceE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect []interface{}
+ iserr bool
+ }{
+ {[]interface{}{1, 3}, []interface{}{1, 3}, false},
+ {[]map[string]interface{}{{"k1": 1}, {"k2": 2}}, []interface{}{map[string]interface{}{"k1": 1}, map[string]interface{}{"k2": 2}}, false},
+ // errors
+ {nil, nil, true},
+ {testing.T{}, nil, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToSliceE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToSlice(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToStringSliceE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect []string
+ iserr bool
+ }{
+ {[]string{"a", "b"}, []string{"a", "b"}, false},
+ {[]interface{}{1, 3}, []string{"1", "3"}, false},
+ {interface{}(1), []string{"1"}, false},
+ // errors
+ {nil, nil, true},
+ {testing.T{}, nil, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToStringSliceE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToStringSlice(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func TestToBoolE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect bool
+ iserr bool
+ }{
+ {0, false, false},
+ {nil, false, false},
+ {"false", false, false},
+ {"FALSE", false, false},
+ {"False", false, false},
+ {"f", false, false},
+ {"F", false, false},
+ {false, false, false},
+
+ {"true", true, false},
+ {"TRUE", true, false},
+ {"True", true, false},
+ {"t", true, false},
+ {"T", true, false},
+ {1, true, false},
+ {true, true, false},
+ {-1, true, false},
+
+ // errors
+ {"test", false, true},
+ {testing.T{}, false, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToBoolE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToBool(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
+
+func BenchmarkTooBool(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ if !ToBool(true) {
+ b.Fatal("ToBool returned false")
+ }
+ }
+}
+
+func TestIndirectPointers(t *testing.T) {
+ x := 13
+ y := &x
+ z := &y
+
+ assert.Equal(t, ToInt(y), 13)
+ assert.Equal(t, ToInt(z), 13)
+}
+
+func TestToTimeEE(t *testing.T) {
+ tests := []struct {
+ input interface{}
+ expect time.Time
+ iserr bool
+ }{
+ {"2009-11-10 23:00:00 +0000 UTC", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // Time.String()
+ {"Tue Nov 10 23:00:00 2009", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // ANSIC
+ {"Tue Nov 10 23:00:00 UTC 2009", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // UnixDate
+ {"Tue Nov 10 23:00:00 +0000 2009", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RubyDate
+ {"10 Nov 09 23:00 UTC", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC822
+ {"10 Nov 09 23:00 +0000", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC822Z
+ {"Tuesday, 10-Nov-09 23:00:00 UTC", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC850
+ {"Tue, 10 Nov 2009 23:00:00 UTC", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC1123
+ {"Tue, 10 Nov 2009 23:00:00 +0000", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC1123Z
+ {"2009-11-10T23:00:00Z", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC3339
+ {"2009-11-10T23:00:00Z", time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), false}, // RFC3339Nano
+ {"11:00PM", time.Date(0, 1, 1, 23, 0, 0, 0, time.UTC), false}, // Kitchen
+ {"Nov 10 23:00:00", time.Date(0, 11, 10, 23, 0, 0, 0, time.UTC), false}, // Stamp
+ {"Nov 10 23:00:00.000", time.Date(0, 11, 10, 23, 0, 0, 0, time.UTC), false}, // StampMilli
+ {"Nov 10 23:00:00.000000", time.Date(0, 11, 10, 23, 0, 0, 0, time.UTC), false}, // StampMicro
+ {"Nov 10 23:00:00.000000000", time.Date(0, 11, 10, 23, 0, 0, 0, time.UTC), false}, // StampNano
+ {"2016-03-06 15:28:01-00:00", time.Date(2016, 3, 6, 15, 28, 1, 0, time.UTC), false}, // RFC3339 without T
+ {"2016-03-06 15:28:01", time.Date(2016, 3, 6, 15, 28, 1, 0, time.UTC), false},
+ {"2016-03-06 15:28:01 -0000", time.Date(2016, 3, 6, 15, 28, 1, 0, time.UTC), false},
+ {"2016-03-06 15:28:01 -00:00", time.Date(2016, 3, 6, 15, 28, 1, 0, time.UTC), false},
+ {"2006-01-02", time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), false},
+ {"02 Jan 2006", time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), false},
+ {1472574600, time.Date(2016, 8, 30, 16, 30, 0, 0, time.UTC), false},
+ {int(1482597504), time.Date(2016, 12, 24, 16, 38, 24, 0, time.UTC), false},
+ {int64(1234567890), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false},
+ {int32(1234567890), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false},
+ {uint(1482597504), time.Date(2016, 12, 24, 16, 38, 24, 0, time.UTC), false},
+ {uint64(1234567890), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false},
+ {uint32(1234567890), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false},
+ {time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC), false},
+ // errors
+ {"2006", time.Time{}, true},
+ {testing.T{}, time.Time{}, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToTimeE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v.UTC(), errmsg)
+
+ // Non-E test
+ v = ToTime(test.input)
+ assert.Equal(t, test.expect, v.UTC(), errmsg)
+ }
+}
+
+func TestToDurationE(t *testing.T) {
+ var td time.Duration = 5
+
+ tests := []struct {
+ input interface{}
+ expect time.Duration
+ iserr bool
+ }{
+ {time.Duration(5), td, false},
+ {int(5), td, false},
+ {int64(5), td, false},
+ {int32(5), td, false},
+ {int16(5), td, false},
+ {int8(5), td, false},
+ {uint(5), td, false},
+ {uint64(5), td, false},
+ {uint32(5), td, false},
+ {uint16(5), td, false},
+ {uint8(5), td, false},
+ {float64(5), td, false},
+ {float32(5), td, false},
+ {string("5"), td, false},
+ {string("5ns"), td, false},
+ {string("5us"), time.Microsecond * td, false},
+ {string("5µs"), time.Microsecond * td, false},
+ {string("5ms"), time.Millisecond * td, false},
+ {string("5s"), time.Second * td, false},
+ {string("5m"), time.Minute * td, false},
+ {string("5h"), time.Hour * td, false},
+ // errors
+ {"test", 0, true},
+ {testing.T{}, 0, true},
+ }
+
+ for i, test := range tests {
+ errmsg := fmt.Sprintf("i = %d", i) // assert helper message
+
+ v, err := ToDurationE(test.input)
+ if test.iserr {
+ assert.Error(t, err, errmsg)
+ continue
+ }
+
+ assert.NoError(t, err, errmsg)
+ assert.Equal(t, test.expect, v, errmsg)
+
+ // Non-E test
+ v = ToDuration(test.input)
+ assert.Equal(t, test.expect, v, errmsg)
+ }
+}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
new file mode 100644
index 000000000..4e75f64ba
--- /dev/null
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -0,0 +1,1117 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package cast
+
+import (
+ "errors"
+ "fmt"
+ "html/template"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var errNegativeNotAllowed = errors.New("unable to cast negative value")
+
+// ToTimeE casts an interface to a time.Time type.
+func ToTimeE(i interface{}) (tim time.Time, err error) {
+ i = indirect(i)
+
+ switch v := i.(type) {
+ case time.Time:
+ return v, nil
+ case string:
+ return StringToDate(v)
+ case int:
+ return time.Unix(int64(v), 0), nil
+ case int64:
+ return time.Unix(v, 0), nil
+ case int32:
+ return time.Unix(int64(v), 0), nil
+ case uint:
+ return time.Unix(int64(v), 0), nil
+ case uint64:
+ return time.Unix(int64(v), 0), nil
+ case uint32:
+ return time.Unix(int64(v), 0), nil
+ default:
+ return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
+ }
+}
+
+// ToDurationE casts an interface to a time.Duration type.
+func ToDurationE(i interface{}) (d time.Duration, err error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case time.Duration:
+ return s, nil
+ case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
+ d = time.Duration(ToInt64(s))
+ return
+ case float32, float64:
+ d = time.Duration(ToFloat64(s))
+ return
+ case string:
+ if strings.ContainsAny(s, "nsuµmh") {
+ d, err = time.ParseDuration(s)
+ } else {
+ d, err = time.ParseDuration(s + "ns")
+ }
+ return
+ default:
+ err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
+ return
+ }
+}
+
+// ToBoolE casts an interface to a bool type.
+func ToBoolE(i interface{}) (bool, error) {
+ i = indirect(i)
+
+ switch b := i.(type) {
+ case bool:
+ return b, nil
+ case nil:
+ return false, nil
+ case int:
+ if i.(int) != 0 {
+ return true, nil
+ }
+ return false, nil
+ case string:
+ return strconv.ParseBool(i.(string))
+ default:
+ return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
+ }
+}
+
+// ToFloat64E casts an interface to a float64 type.
+func ToFloat64E(i interface{}) (float64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case float64:
+ return s, nil
+ case float32:
+ return float64(s), nil
+ case int:
+ return float64(s), nil
+ case int64:
+ return float64(s), nil
+ case int32:
+ return float64(s), nil
+ case int16:
+ return float64(s), nil
+ case int8:
+ return float64(s), nil
+ case uint:
+ return float64(s), nil
+ case uint64:
+ return float64(s), nil
+ case uint32:
+ return float64(s), nil
+ case uint16:
+ return float64(s), nil
+ case uint8:
+ return float64(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ }
+}
+
+// ToFloat32E casts an interface to a float32 type.
+func ToFloat32E(i interface{}) (float32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case float64:
+ return float32(s), nil
+ case float32:
+ return s, nil
+ case int:
+ return float32(s), nil
+ case int64:
+ return float32(s), nil
+ case int32:
+ return float32(s), nil
+ case int16:
+ return float32(s), nil
+ case int8:
+ return float32(s), nil
+ case uint:
+ return float32(s), nil
+ case uint64:
+ return float32(s), nil
+ case uint32:
+ return float32(s), nil
+ case uint16:
+ return float32(s), nil
+ case uint8:
+ return float32(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 32)
+ if err == nil {
+ return float32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ }
+}
+
+// ToInt64E casts an interface to an int64 type.
+func ToInt64E(i interface{}) (int64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int64(s), nil
+ case int64:
+ return s, nil
+ case int32:
+ return int64(s), nil
+ case int16:
+ return int64(s), nil
+ case int8:
+ return int64(s), nil
+ case uint:
+ return int64(s), nil
+ case uint64:
+ return int64(s), nil
+ case uint32:
+ return int64(s), nil
+ case uint16:
+ return int64(s), nil
+ case uint8:
+ return int64(s), nil
+ case float64:
+ return int64(s), nil
+ case float32:
+ return int64(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ }
+}
+
+// ToInt32E casts an interface to an int32 type.
+func ToInt32E(i interface{}) (int32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int32(s), nil
+ case int64:
+ return int32(s), nil
+ case int32:
+ return s, nil
+ case int16:
+ return int32(s), nil
+ case int8:
+ return int32(s), nil
+ case uint:
+ return int32(s), nil
+ case uint64:
+ return int32(s), nil
+ case uint32:
+ return int32(s), nil
+ case uint16:
+ return int32(s), nil
+ case uint8:
+ return int32(s), nil
+ case float64:
+ return int32(s), nil
+ case float32:
+ return int32(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ }
+}
+
+// ToInt16E casts an interface to an int16 type.
+func ToInt16E(i interface{}) (int16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int16(s), nil
+ case int64:
+ return int16(s), nil
+ case int32:
+ return int16(s), nil
+ case int16:
+ return s, nil
+ case int8:
+ return int16(s), nil
+ case uint:
+ return int16(s), nil
+ case uint64:
+ return int16(s), nil
+ case uint32:
+ return int16(s), nil
+ case uint16:
+ return int16(s), nil
+ case uint8:
+ return int16(s), nil
+ case float64:
+ return int16(s), nil
+ case float32:
+ return int16(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ }
+}
+
+// ToInt8E casts an interface to an int8 type.
+func ToInt8E(i interface{}) (int8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int8(s), nil
+ case int64:
+ return int8(s), nil
+ case int32:
+ return int8(s), nil
+ case int16:
+ return int8(s), nil
+ case int8:
+ return s, nil
+ case uint:
+ return int8(s), nil
+ case uint64:
+ return int8(s), nil
+ case uint32:
+ return int8(s), nil
+ case uint16:
+ return int8(s), nil
+ case uint8:
+ return int8(s), nil
+ case float64:
+ return int8(s), nil
+ case float32:
+ return int8(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ }
+}
+
+// ToIntE casts an interface to an int type.
+func ToIntE(i interface{}) (int, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return s, nil
+ case int64:
+ return int(s), nil
+ case int32:
+ return int(s), nil
+ case int16:
+ return int(s), nil
+ case int8:
+ return int(s), nil
+ case uint:
+ return int(s), nil
+ case uint64:
+ return int(s), nil
+ case uint32:
+ return int(s), nil
+ case uint16:
+ return int(s), nil
+ case uint8:
+ return int(s), nil
+ case float64:
+ return int(s), nil
+ case float32:
+ return int(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ }
+}
+
+// ToUintE casts an interface to a uint type.
+func ToUintE(i interface{}) (uint, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 0)
+ if err == nil {
+ return uint(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case uint:
+ return s, nil
+ case uint64:
+ return uint(s), nil
+ case uint32:
+ return uint(s), nil
+ case uint16:
+ return uint(s), nil
+ case uint8:
+ return uint(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+ }
+}
+
+// ToUint64E casts an interface to a uint64 type.
+func ToUint64E(i interface{}) (uint64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case uint:
+ return uint64(s), nil
+ case uint64:
+ return s, nil
+ case uint32:
+ return uint64(s), nil
+ case uint16:
+ return uint64(s), nil
+ case uint8:
+ return uint64(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+ }
+}
+
+// ToUint32E casts an interface to a uint32 type.
+func ToUint32E(i interface{}) (uint32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 32)
+ if err == nil {
+ return uint32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case uint:
+ return uint32(s), nil
+ case uint64:
+ return uint32(s), nil
+ case uint32:
+ return s, nil
+ case uint16:
+ return uint32(s), nil
+ case uint8:
+ return uint32(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+ }
+}
+
+// ToUint16E casts an interface to a uint16 type.
+func ToUint16E(i interface{}) (uint16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 16)
+ if err == nil {
+ return uint16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case uint:
+ return uint16(s), nil
+ case uint64:
+ return uint16(s), nil
+ case uint32:
+ return uint16(s), nil
+ case uint16:
+ return s, nil
+ case uint8:
+ return uint16(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+ }
+}
+
+// ToUint8E casts an interface to a uint type.
+func ToUint8E(i interface{}) (uint8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 8)
+ if err == nil {
+ return uint8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case uint:
+ return uint8(s), nil
+ case uint64:
+ return uint8(s), nil
+ case uint32:
+ return uint8(s), nil
+ case uint16:
+ return uint8(s), nil
+ case uint8:
+ return s, nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
+ }
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirect returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil).
+func indirect(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+ if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
+ // Avoid creating a reflect.Value if it's not a pointer.
+ return a
+ }
+ v := reflect.ValueOf(a)
+ for v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirectToStringerOrError returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
+// or error,
+func indirectToStringerOrError(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+
+ var errorType = reflect.TypeOf((*error)(nil)).Elem()
+ var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+ v := reflect.ValueOf(a)
+ for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// ToStringE casts an interface to a string type.
+func ToStringE(i interface{}) (string, error) {
+ i = indirectToStringerOrError(i)
+
+ switch s := i.(type) {
+ case string:
+ return s, nil
+ case bool:
+ return strconv.FormatBool(s), nil
+ case float64:
+ return strconv.FormatFloat(s, 'f', -1, 64), nil
+ case float32:
+ return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
+ case int:
+ return strconv.Itoa(s), nil
+ case int64:
+ return strconv.FormatInt(s, 10), nil
+ case int32:
+ return strconv.Itoa(int(s)), nil
+ case int16:
+ return strconv.FormatInt(int64(s), 10), nil
+ case int8:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint64:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint32:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint16:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint8:
+ return strconv.FormatInt(int64(s), 10), nil
+ case []byte:
+ return string(s), nil
+ case template.HTML:
+ return string(s), nil
+ case template.URL:
+ return string(s), nil
+ case template.JS:
+ return string(s), nil
+ case template.CSS:
+ return string(s), nil
+ case template.HTMLAttr:
+ return string(s), nil
+ case nil:
+ return "", nil
+ case fmt.Stringer:
+ return s.String(), nil
+ case error:
+ return s.Error(), nil
+ default:
+ return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i)
+ }
+}
+
+// ToStringMapStringE casts an interface to a map[string]string type.
+func ToStringMapStringE(i interface{}) (map[string]string, error) {
+ var m = map[string]string{}
+
+ switch v := i.(type) {
+ case map[string]string:
+ return v, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i)
+ }
+}
+
+// ToStringMapStringSliceE casts an interface to a map[string][]string type.
+func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
+ var m = map[string][]string{}
+
+ switch v := i.(type) {
+ case map[string][]string:
+ return v, nil
+ case map[string][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[string]string:
+ for k, val := range v {
+ m[ToString(k)] = []string{val}
+ }
+ case map[string]interface{}:
+ for k, val := range v {
+ switch vt := val.(type) {
+ case []interface{}:
+ m[ToString(k)] = ToStringSlice(vt)
+ case []string:
+ m[ToString(k)] = vt
+ default:
+ m[ToString(k)] = []string{ToString(val)}
+ }
+ }
+ return m, nil
+ case map[interface{}][]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ key, err := ToStringE(k)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ value, err := ToStringSliceE(val)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ m[key] = value
+ }
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ return m, nil
+}
+
+// ToStringMapBoolE casts an interface to a map[string]bool type.
+func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
+ var m = map[string]bool{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]bool:
+ return v, nil
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i)
+ }
+}
+
+// ToStringMapE casts an interface to a map[string]interface{} type.
+func ToStringMapE(i interface{}) (map[string]interface{}, error) {
+ var m = map[string]interface{}{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = val
+ }
+ return m, nil
+ case map[string]interface{}:
+ return v, nil
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i)
+ }
+}
+
+// ToSliceE casts an interface to a []interface{} type.
+func ToSliceE(i interface{}) ([]interface{}, error) {
+ var s []interface{}
+
+ switch v := i.(type) {
+ case []interface{}:
+ return append(s, v...), nil
+ case []map[string]interface{}:
+ for _, u := range v {
+ s = append(s, u)
+ }
+ return s, nil
+ default:
+ return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i)
+ }
+}
+
+// ToBoolSliceE casts an interface to a []bool type.
+func ToBoolSliceE(i interface{}) ([]bool, error) {
+ if i == nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+
+ switch v := i.(type) {
+ case []bool:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]bool, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToBoolE(s.Index(j).Interface())
+ if err != nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+}
+
+// ToStringSliceE casts an interface to a []string type.
+func ToStringSliceE(i interface{}) ([]string, error) {
+ var a []string
+
+ switch v := i.(type) {
+ case []interface{}:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []string:
+ return v, nil
+ case string:
+ return strings.Fields(v), nil
+ case interface{}:
+ str, err := ToStringE(v)
+ if err != nil {
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+ return []string{str}, nil
+ default:
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+}
+
+// ToIntSliceE casts an interface to a []int type.
+func ToIntSliceE(i interface{}) ([]int, error) {
+ if i == nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+
+ switch v := i.(type) {
+ case []int:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]int, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToIntE(s.Index(j).Interface())
+ if err != nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+}
+
+// StringToDate attempts to parse a string into a time.Time type using a
+// predefined list of formats. If no suitable format is found, an error is
+// returned.
+func StringToDate(s string) (time.Time, error) {
+ return parseDateWith(s, []string{
+ time.RFC3339,
+ "2006-01-02T15:04:05", // iso8601 without timezone
+ time.RFC1123Z,
+ time.RFC1123,
+ time.RFC822Z,
+ time.RFC822,
+ time.RFC850,
+ time.ANSIC,
+ time.UnixDate,
+ time.RubyDate,
+ "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String()
+ "2006-01-02",
+ "02 Jan 2006",
+ "2006-01-02 15:04:05 -07:00",
+ "2006-01-02 15:04:05 -0700",
+ "2006-01-02 15:04:05Z07:00", // RFC3339 without T
+ "2006-01-02 15:04:05",
+ time.Kitchen,
+ time.Stamp,
+ time.StampMilli,
+ time.StampMicro,
+ time.StampNano,
+ })
+}
+
+func parseDateWith(s string, dates []string) (d time.Time, e error) {
+ for _, dateType := range dates {
+ if d, e = time.Parse(dateType, s); e == nil {
+ return
+ }
+ }
+ return d, fmt.Errorf("unable to parse date: %s", s)
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index ae3930dfc..664bf5aa5 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -788,6 +788,7 @@ func (c *Command) initHelpCmd() {
},
}
}
+ c.RemoveCommand(c.helpCommand)
c.AddCommand(c.helpCommand)
}
diff --git a/vendor/github.com/spf13/jwalterweatherman/.gitignore b/vendor/github.com/spf13/jwalterweatherman/.gitignore
new file mode 100644
index 000000000..00268614f
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/spf13/jwalterweatherman/LICENSE b/vendor/github.com/spf13/jwalterweatherman/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/spf13/jwalterweatherman/README.md b/vendor/github.com/spf13/jwalterweatherman/README.md
new file mode 100644
index 000000000..350a9683d
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/README.md
@@ -0,0 +1,148 @@
+jWalterWeatherman
+=================
+
+Seamless printing to the terminal (stdout) and logging to a io.Writer
+(file) that’s as easy to use as fmt.Println.
+
+![and_that__s_why_you_always_leave_a_note_by_jonnyetc-d57q7um](https://cloud.githubusercontent.com/assets/173412/11002937/ccd01654-847d-11e5-828e-12ebaf582eaf.jpg)
+Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422)
+
+JWW is primarily a wrapper around the excellent standard log library. It
+provides a few advantages over using the standard log library alone.
+
+1. Ready to go out of the box.
+2. One library for both printing to the terminal and logging (to files).
+3. Really easy to log to either a temp file or a file you specify.
+
+
+I really wanted a very straightforward library that could seamlessly do
+the following things.
+
+1. Replace all the println, printf, etc statements thought my code with
+ something more useful
+2. Allow the user to easily control what levels are printed to stdout
+3. Allow the user to easily control what levels are logged
+4. Provide an easy mechanism (like fmt.Println) to print info to the user
+ which can be easily logged as well
+5. Due to 2 & 3 provide easy verbose mode for output and logs
+6. Not have any unnecessary initialization cruft. Just use it.
+
+# Usage
+
+## Step 1. Use it
+Put calls throughout your source based on type of feedback.
+No initialization or setup needs to happen. Just start calling things.
+
+Available Loggers are:
+
+ * TRACE
+ * DEBUG
+ * INFO
+ * WARN
+ * ERROR
+ * CRITICAL
+ * FATAL
+
+These each are loggers based on the log standard library and follow the
+standard usage. Eg.
+
+```go
+ import (
+ jww "github.com/spf13/jwalterweatherman"
+ )
+
+ ...
+
+ if err != nil {
+
+ // This is a pretty serious error and the user should know about
+ // it. It will be printed to the terminal as well as logged under the
+ // default thresholds.
+
+ jww.ERROR.Println(err)
+ }
+
+ if err2 != nil {
+ // This error isn’t going to materially change the behavior of the
+ // application, but it’s something that may not be what the user
+ // expects. Under the default thresholds, Warn will be logged, but
+ // not printed to the terminal.
+
+ jww.WARN.Println(err2)
+ }
+
+ // Information that’s relevant to what’s happening, but not very
+ // important for the user. Under the default thresholds this will be
+ // discarded.
+
+ jww.INFO.Printf("information %q", response)
+
+```
+
+NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook:
+
+```go
+notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+notepad.WARN.Println("Some warning"")
+```
+
+_Why 7 levels?_
+
+Maybe you think that 7 levels are too much for any application... and you
+are probably correct. Just because there are seven levels doesn’t mean
+that you should be using all 7 levels. Pick the right set for your needs.
+Remember they only have to mean something to your project.
+
+## Step 2. Optionally configure JWW
+
+Under the default thresholds :
+
+ * Debug, Trace & Info goto /dev/null
+ * Warn and above is logged (when a log file/io.Writer is provided)
+ * Error and above is printed to the terminal (stdout)
+
+### Changing the thresholds
+
+The threshold can be changed at any time, but will only affect calls that
+execute after the change was made.
+
+This is very useful if your application has a verbose mode. Of course you
+can decide what verbose means to you or even have multiple levels of
+verbosity.
+
+
+```go
+ import (
+ jww "github.com/spf13/jwalterweatherman"
+ )
+
+ if Verbose {
+ jww.SetLogThreshold(jww.LevelTrace)
+ jww.SetStdoutThreshold(jww.LevelInfo)
+ }
+```
+
+Note that JWW's own internal output uses log levels as well, so set the log
+level before making any other calls if you want to see what it's up to.
+
+
+### Setting a log file
+
+JWW can log to any `io.Writer`:
+
+
+```go
+
+ jww.SetLogOutput(customWriter)
+
+```
+
+
+# More information
+
+This is an early release. I’ve been using it for a while and this is the
+third interface I’ve tried. I like this one pretty well, but no guarantees
+that it won’t change a bit.
+
+I wrote this for use in [hugo](http://hugo.spf13.com). If you are looking
+for a static website engine that’s super fast please checkout Hugo.
diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
new file mode 100644
index 000000000..bcb763403
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
@@ -0,0 +1,113 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+)
+
+var (
+ TRACE *log.Logger
+ DEBUG *log.Logger
+ INFO *log.Logger
+ WARN *log.Logger
+ ERROR *log.Logger
+ CRITICAL *log.Logger
+ FATAL *log.Logger
+
+ LOG *log.Logger
+ FEEDBACK *Feedback
+
+ defaultNotepad *Notepad
+)
+
+func reloadDefaultNotepad() {
+ TRACE = defaultNotepad.TRACE
+ DEBUG = defaultNotepad.DEBUG
+ INFO = defaultNotepad.INFO
+ WARN = defaultNotepad.WARN
+ ERROR = defaultNotepad.ERROR
+ CRITICAL = defaultNotepad.CRITICAL
+ FATAL = defaultNotepad.FATAL
+
+ LOG = defaultNotepad.LOG
+ FEEDBACK = defaultNotepad.FEEDBACK
+}
+
+func init() {
+ defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+ reloadDefaultNotepad()
+}
+
+// SetLogThreshold set the log threshold for the default notepad. Trace by default.
+func SetLogThreshold(threshold Threshold) {
+ defaultNotepad.SetLogThreshold(threshold)
+ reloadDefaultNotepad()
+}
+
+// SetLogOutput set the log output for the default notepad. Discarded by default.
+func SetLogOutput(handle io.Writer) {
+ defaultNotepad.SetLogOutput(handle)
+ reloadDefaultNotepad()
+}
+
+// SetStdoutThreshold set the standard output threshold for the default notepad.
+// Info by default.
+func SetStdoutThreshold(threshold Threshold) {
+ defaultNotepad.SetStdoutThreshold(threshold)
+ reloadDefaultNotepad()
+}
+
+// SetPrefix set the prefix for the default logger. Empty by default.
+func SetPrefix(prefix string) {
+ defaultNotepad.SetPrefix(prefix)
+ reloadDefaultNotepad()
+}
+
+// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default.
+func SetFlags(flags int) {
+ defaultNotepad.SetFlags(flags)
+ reloadDefaultNotepad()
+}
+
+// Level returns the current global log threshold.
+func LogThreshold() Threshold {
+ return defaultNotepad.logThreshold
+}
+
+// Level returns the current global output threshold.
+func StdoutThreshold() Threshold {
+ return defaultNotepad.stdoutThreshold
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func GetLogThreshold() Threshold {
+ return defaultNotepad.GetLogThreshold()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func GetStdoutThreshold() Threshold {
+ return defaultNotepad.GetStdoutThreshold()
+}
+
+// LogCountForLevel returns the number of log invocations for a given threshold.
+func LogCountForLevel(l Threshold) uint64 {
+ return defaultNotepad.LogCountForLevel(l)
+}
+
+// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
+// greater than or equal to a given threshold.
+func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
+ return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold)
+}
+
+// ResetLogCounters resets the invocation counters for all levels.
+func ResetLogCounters() {
+ defaultNotepad.ResetLogCounters()
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad_test.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad_test.go
new file mode 100644
index 000000000..2670c8d96
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad_test.go
@@ -0,0 +1,102 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "bytes"
+ "io/ioutil"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestThresholds(t *testing.T) {
+ SetStdoutThreshold(LevelError)
+ require.Equal(t, StdoutThreshold(), LevelError)
+ SetLogThreshold(LevelCritical)
+ require.Equal(t, LogThreshold(), LevelCritical)
+ require.NotEqual(t, StdoutThreshold(), LevelCritical)
+ SetStdoutThreshold(LevelWarn)
+ require.Equal(t, StdoutThreshold(), LevelWarn)
+}
+
+func TestDefaultLogging(t *testing.T) {
+ var outputBuf, logBuf bytes.Buffer
+
+ defaultNotepad.logHandle = &logBuf
+ defaultNotepad.outHandle = &outputBuf
+
+ SetLogThreshold(LevelWarn)
+ SetStdoutThreshold(LevelError)
+
+ FATAL.Println("fatal err")
+ CRITICAL.Println("critical err")
+ ERROR.Println("an error")
+ WARN.Println("a warning")
+ INFO.Println("information")
+ DEBUG.Println("debugging info")
+ TRACE.Println("trace")
+
+ require.Contains(t, logBuf.String(), "fatal err")
+ require.Contains(t, logBuf.String(), "critical err")
+ require.Contains(t, logBuf.String(), "an error")
+ require.Contains(t, logBuf.String(), "a warning")
+ require.NotContains(t, logBuf.String(), "information")
+ require.NotContains(t, logBuf.String(), "debugging info")
+ require.NotContains(t, logBuf.String(), "trace")
+
+ require.Contains(t, outputBuf.String(), "fatal err")
+ require.Contains(t, outputBuf.String(), "critical err")
+ require.Contains(t, outputBuf.String(), "an error")
+ require.NotContains(t, outputBuf.String(), "a warning")
+ require.NotContains(t, outputBuf.String(), "information")
+ require.NotContains(t, outputBuf.String(), "debugging info")
+ require.NotContains(t, outputBuf.String(), "trace")
+}
+
+func TestLogCounter(t *testing.T) {
+ defaultNotepad.logHandle = ioutil.Discard
+ defaultNotepad.outHandle = ioutil.Discard
+
+ SetLogThreshold(LevelTrace)
+ SetStdoutThreshold(LevelTrace)
+
+ FATAL.Println("fatal err")
+ CRITICAL.Println("critical err")
+ WARN.Println("a warning")
+ WARN.Println("another warning")
+ INFO.Println("information")
+ DEBUG.Println("debugging info")
+ TRACE.Println("trace")
+
+ wg := &sync.WaitGroup{}
+
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for j := 0; j < 10; j++ {
+ ERROR.Println("error", j)
+ // check for data races
+ require.True(t, LogCountForLevel(LevelError) > uint64(j))
+ require.True(t, LogCountForLevelsGreaterThanorEqualTo(LevelError) > uint64(j))
+ }
+ }()
+
+ }
+
+ wg.Wait()
+
+ require.Equal(t, uint64(1), LogCountForLevel(LevelFatal))
+ require.Equal(t, uint64(1), LogCountForLevel(LevelCritical))
+ require.Equal(t, uint64(2), LogCountForLevel(LevelWarn))
+ require.Equal(t, uint64(1), LogCountForLevel(LevelInfo))
+ require.Equal(t, uint64(1), LogCountForLevel(LevelDebug))
+ require.Equal(t, uint64(1), LogCountForLevel(LevelTrace))
+ require.Equal(t, uint64(100), LogCountForLevel(LevelError))
+ require.Equal(t, uint64(102), LogCountForLevelsGreaterThanorEqualTo(LevelError))
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
new file mode 100644
index 000000000..570db1d4c
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
@@ -0,0 +1,56 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "sync/atomic"
+)
+
+type logCounter struct {
+ counter uint64
+}
+
+func (c *logCounter) incr() {
+ atomic.AddUint64(&c.counter, 1)
+}
+
+func (c *logCounter) resetCounter() {
+ atomic.StoreUint64(&c.counter, 0)
+}
+
+func (c *logCounter) getCount() uint64 {
+ return atomic.LoadUint64(&c.counter)
+}
+
+func (c *logCounter) Write(p []byte) (n int, err error) {
+ c.incr()
+
+ return len(p), nil
+}
+
+// LogCountForLevel returns the number of log invocations for a given threshold.
+func (n *Notepad) LogCountForLevel(l Threshold) uint64 {
+ return n.logCounters[l].getCount()
+}
+
+// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
+// greater than or equal to a given threshold.
+func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
+ var cnt uint64
+
+ for i := int(threshold); i < len(n.logCounters); i++ {
+ cnt += n.LogCountForLevel(Threshold(i))
+ }
+
+ return cnt
+}
+
+// ResetLogCounters resets the invocation counters for all levels.
+func (n *Notepad) ResetLogCounters() {
+ for _, np := range n.logCounters {
+ np.resetCounter()
+ }
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go
new file mode 100644
index 000000000..5a623f487
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go
@@ -0,0 +1,195 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+type Threshold int
+
+func (t Threshold) String() string {
+ return prefixes[t]
+}
+
+const (
+ LevelTrace Threshold = iota
+ LevelDebug
+ LevelInfo
+ LevelWarn
+ LevelError
+ LevelCritical
+ LevelFatal
+)
+
+var prefixes map[Threshold]string = map[Threshold]string{
+ LevelTrace: "TRACE",
+ LevelDebug: "DEBUG",
+ LevelInfo: "INFO",
+ LevelWarn: "WARN",
+ LevelError: "ERROR",
+ LevelCritical: "CRITICAL",
+ LevelFatal: "FATAL",
+}
+
+func prefix(t Threshold) string {
+ return t.String() + " "
+}
+
+// Notepad is where you leave a note !
+type Notepad struct {
+ TRACE *log.Logger
+ DEBUG *log.Logger
+ INFO *log.Logger
+ WARN *log.Logger
+ ERROR *log.Logger
+ CRITICAL *log.Logger
+ FATAL *log.Logger
+
+ LOG *log.Logger
+ FEEDBACK *Feedback
+
+ loggers []**log.Logger
+ logHandle io.Writer
+ outHandle io.Writer
+ logThreshold Threshold
+ stdoutThreshold Threshold
+ prefix string
+ flags int
+
+ // One per Threshold
+ logCounters [7]*logCounter
+}
+
+// NewNotepad create a new notepad.
+func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad {
+ n := &Notepad{}
+
+ n.loggers = append(n.loggers, &n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL)
+ n.logHandle = logHandle
+ n.outHandle = outHandle
+ n.logThreshold = logThreshold
+ n.stdoutThreshold = outThreshold
+
+ if len(prefix) != 0 {
+ n.prefix = "[" + prefix + "] "
+ } else {
+ n.prefix = ""
+ }
+
+ n.flags = flags
+
+ n.LOG = log.New(n.logHandle,
+ "LOG: ",
+ n.flags)
+
+ n.FEEDBACK = &Feedback{n}
+
+ n.init()
+
+ return n
+}
+
+// Feedback is special. It writes plainly to the output while
+// logging with the standard extra information (date, file, etc)
+// Only Println and Printf are currently provided for this
+type Feedback struct {
+ *Notepad
+}
+
+// init create the loggers for each level depending on the notepad thresholds
+func (n *Notepad) init() {
+ bothHandle := io.MultiWriter(n.outHandle, n.logHandle)
+
+ for t, logger := range n.loggers {
+ threshold := Threshold(t)
+ counter := &logCounter{}
+ n.logCounters[t] = counter
+
+ switch {
+ case threshold >= n.logThreshold && threshold >= n.stdoutThreshold:
+ *logger = log.New(io.MultiWriter(counter, bothHandle), n.prefix+prefix(threshold), n.flags)
+
+ case threshold >= n.logThreshold:
+ *logger = log.New(io.MultiWriter(counter, n.logHandle), n.prefix+prefix(threshold), n.flags)
+
+ case threshold >= n.stdoutThreshold:
+ *logger = log.New(io.MultiWriter(counter, os.Stdout), n.prefix+prefix(threshold), n.flags)
+
+ default:
+ *logger = log.New(counter, n.prefix+prefix(threshold), n.flags)
+ }
+ }
+}
+
+// SetLogThreshold change the threshold above which messages are written to the
+// log file
+func (n *Notepad) SetLogThreshold(threshold Threshold) {
+ n.logThreshold = threshold
+ n.init()
+}
+
+// SetLogOutput change the file where log messages are written
+func (n *Notepad) SetLogOutput(handle io.Writer) {
+ n.logHandle = handle
+ n.init()
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func (n *Notepad) GetLogThreshold() Threshold {
+ return n.logThreshold
+}
+
+// SetStdoutThreshold change the threshold above which messages are written to the
+// standard output
+func (n *Notepad) SetStdoutThreshold(threshold Threshold) {
+ n.stdoutThreshold = threshold
+ n.init()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func (n *Notepad) GetStdoutThreshold() Threshold {
+ return n.stdoutThreshold
+}
+
+// SetPrefix change the prefix used by the notepad. Prefixes are displayed between
+// brackets at the begining of the line. An empty prefix won't be displayed at all.
+func (n *Notepad) SetPrefix(prefix string) {
+ if len(prefix) != 0 {
+ n.prefix = "[" + prefix + "] "
+ } else {
+ n.prefix = ""
+ }
+ n.init()
+}
+
+// SetFlags choose which flags the logger will display (after prefix and message
+// level). See the package log for more informations on this.
+func (n *Notepad) SetFlags(flags int) {
+ n.flags = flags
+ n.init()
+}
+
+// Feedback is special. It writes plainly to the output while
+// logging with the standard extra information (date, file, etc)
+// Only Println and Printf are currently provided for this
+func (fb *Feedback) Println(v ...interface{}) {
+ s := fmt.Sprintln(v...)
+ fmt.Print(s)
+ fb.LOG.Output(2, s)
+}
+
+// Feedback is special. It writes plainly to the output while
+// logging with the standard extra information (date, file, etc)
+// Only Println and Printf are currently provided for this
+func (fb *Feedback) Printf(format string, v ...interface{}) {
+ s := fmt.Sprintf(format, v...)
+ fmt.Print(s)
+ fb.LOG.Output(2, s)
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad_test.go b/vendor/github.com/spf13/jwalterweatherman/notepad_test.go
new file mode 100644
index 000000000..d0e3ab04b
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/notepad_test.go
@@ -0,0 +1,41 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestNotepad(t *testing.T) {
+
+ var logHandle, outHandle bytes.Buffer
+
+ n := NewNotepad(LevelCritical, LevelError, &outHandle, &logHandle, "TestNotePad", 0)
+
+ require.Equal(t, LevelCritical, n.GetStdoutThreshold())
+ require.Equal(t, LevelError, n.GetLogThreshold())
+
+ n.DEBUG.Println("Some debug")
+ n.ERROR.Println("Some error")
+ n.CRITICAL.Println("Some critical error")
+
+ require.Contains(t, logHandle.String(), "[TestNotePad] ERROR Some error")
+ require.NotContains(t, logHandle.String(), "Some debug")
+ require.NotContains(t, outHandle.String(), "Some error")
+ require.Contains(t, outHandle.String(), "Some critical error")
+
+ require.Equal(t, n.LogCountForLevel(LevelError), uint64(1))
+ require.Equal(t, n.LogCountForLevel(LevelDebug), uint64(1))
+ require.Equal(t, n.LogCountForLevel(LevelTrace), uint64(0))
+}
+
+func TestThresholdString(t *testing.T) {
+ require.Equal(t, LevelError.String(), "ERROR")
+ require.Equal(t, LevelTrace.String(), "TRACE")
+}
diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore
new file mode 100644
index 000000000..352a34a56
--- /dev/null
+++ b/vendor/github.com/spf13/viper/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.bench \ No newline at end of file
diff --git a/vendor/github.com/spf13/viper/.travis.yml b/vendor/github.com/spf13/viper/.travis.yml
new file mode 100644
index 000000000..d4c2559c2
--- /dev/null
+++ b/vendor/github.com/spf13/viper/.travis.yml
@@ -0,0 +1,26 @@
+go_import_path: github.com/spf13/viper
+
+language: go
+go:
+ - 1.7.5
+ - 1.8
+ - tip
+
+os:
+ - linux
+ - osx
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+script:
+ - go install ./...
+ - go test -v ./...
+
+after_success:
+ - go get -u -d github.com/spf13/hugo
+ - cd $GOPATH/src/github.com/spf13/hugo && make && ./hugo -s docs && cd -
+
+sudo: false
diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/vendor/github.com/spf13/viper/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md
new file mode 100644
index 000000000..25181dff1
--- /dev/null
+++ b/vendor/github.com/spf13/viper/README.md
@@ -0,0 +1,621 @@
+![viper logo](https://cloud.githubusercontent.com/assets/173412/10886745/998df88a-8151-11e5-9448-4736db51020d.png)
+
+Go configuration with fangs!
+
+Many Go projects are built using Viper including:
+
+* [Hugo](http://gohugo.io)
+* [EMC RexRay](http://rexray.readthedocs.org/en/stable/)
+* [Imgur's Incus](https://github.com/Imgur/incus)
+* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+* [Docker Notary](https://github.com/docker/Notary)
+* [BloomApi](https://www.bloomapi.com/)
+* [doctl](https://github.com/digitalocean/doctl)
+
+[![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper)
+
+
+## What is Viper?
+
+Viper is a complete configuration solution for go applications including 12 factor apps. It is designed
+to work within an application, and can handle all types of configuration needs
+and formats. It supports:
+
+* setting defaults
+* reading from JSON, TOML, YAML, HCL, and Java properties config files
+* live watching and re-reading of config files (optional)
+* reading from environment variables
+* reading from remote config systems (etcd or Consul), and watching changes
+* reading from command line flags
+* reading from buffer
+* setting explicit values
+
+Viper can be thought of as a registry for all of your applications
+configuration needs.
+
+## Why Viper?
+
+When building a modern application, you don’t want to worry about
+configuration file formats; you want to focus on building awesome software.
+Viper is here to help with that.
+
+Viper does the following for you:
+
+1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, or Java properties formats.
+2. Provide a mechanism to set default values for your different
+ configuration options.
+3. Provide a mechanism to set override values for options specified through
+ command line flags.
+4. Provide an alias system to easily rename parameters without breaking existing
+ code.
+5. Make it easy to tell the difference between when a user has provided a
+ command line or config file which is the same as the default.
+
+Viper uses the following precedence order. Each item takes precedence over the
+item below it:
+
+ * explicit call to Set
+ * flag
+ * env
+ * config
+ * key/value store
+ * default
+
+Viper configuration keys are case insensitive.
+
+## Putting Values into Viper
+
+### Establishing Defaults
+
+A good configuration system will support default values. A default value is not
+required for a key, but it's useful in the event that a key hasn’t been set via
+config file, environment variable, remote configuration or flag.
+
+Examples:
+
+```go
+viper.SetDefault("ContentDir", "content")
+viper.SetDefault("LayoutDir", "layouts")
+viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"})
+```
+
+### Reading Config Files
+
+Viper requires minimal configuration so it knows where to look for config files.
+Viper supports JSON, TOML, YAML, HCL, and Java Properties files. Viper can search multiple paths, but
+currently a single Viper instance only supports a single configuration file.
+Viper does not default to any configuration search paths leaving defaults decision
+to an application.
+
+Here is an example of how to use Viper to search for and read a configuration file.
+None of the specific paths are required, but at least one path should be provided
+where a configuration file is expected.
+
+```go
+viper.SetConfigName("config") // name of config file (without extension)
+viper.AddConfigPath("/etc/appname/") // path to look for the config file in
+viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths
+viper.AddConfigPath(".") // optionally look for config in the working directory
+err := viper.ReadInConfig() // Find and read the config file
+if err != nil { // Handle errors reading the config file
+ panic(fmt.Errorf("Fatal error config file: %s \n", err))
+}
+```
+
+### Watching and re-reading config files
+
+Viper supports the ability to have your application live read a config file while running.
+
+Gone are the days of needing to restart a server to have a config take effect,
+viper powered applications can read an update to a config file while running and
+not miss a beat.
+
+Simply tell the viper instance to watchConfig.
+Optionally you can provide a function for Viper to run each time a change occurs.
+
+**Make sure you add all of the configPaths prior to calling `WatchConfig()`**
+
+```go
+ viper.WatchConfig()
+ viper.OnConfigChange(func(e fsnotify.Event) {
+ fmt.Println("Config file changed:", e.Name)
+ })
+```
+
+### Reading Config from io.Reader
+
+Viper predefines many configuration sources such as files, environment
+variables, flags, and remote K/V store, but you are not bound to them. You can
+also implement your own required configuration source and feed it to viper.
+
+```go
+viper.SetConfigType("yaml") // or viper.SetConfigType("YAML")
+
+// any approach to require this configuration into your program.
+var yamlExample = []byte(`
+Hacker: true
+name: steve
+hobbies:
+- skateboarding
+- snowboarding
+- go
+clothing:
+ jacket: leather
+ trousers: denim
+age: 35
+eyes : brown
+beard: true
+`)
+
+viper.ReadConfig(bytes.NewBuffer(yamlExample))
+
+viper.Get("name") // this would be "steve"
+```
+
+### Setting Overrides
+
+These could be from a command line flag, or from your own application logic.
+
+```go
+viper.Set("Verbose", true)
+viper.Set("LogFile", LogFile)
+```
+
+### Registering and Using Aliases
+
+Aliases permit a single value to be referenced by multiple keys
+
+```go
+viper.RegisterAlias("loud", "Verbose")
+
+viper.Set("verbose", true) // same result as next line
+viper.Set("loud", true) // same result as prior line
+
+viper.GetBool("loud") // true
+viper.GetBool("verbose") // true
+```
+
+### Working with Environment Variables
+
+Viper has full support for environment variables. This enables 12 factor
+applications out of the box. There are four methods that exist to aid working
+with ENV:
+
+ * `AutomaticEnv()`
+ * `BindEnv(string...) : error`
+ * `SetEnvPrefix(string)`
+ * `SetEnvReplacer(string...) *strings.Replacer`
+
+_When working with ENV variables, it’s important to recognize that Viper
+treats ENV variables as case sensitive._
+
+Viper provides a mechanism to try to ensure that ENV variables are unique. By
+using `SetEnvPrefix`, you can tell Viper to use add a prefix while reading from
+the environment variables. Both `BindEnv` and `AutomaticEnv` will use this
+prefix.
+
+`BindEnv` takes one or two parameters. The first parameter is the key name, the
+second is the name of the environment variable. The name of the environment
+variable is case sensitive. If the ENV variable name is not provided, then
+Viper will automatically assume that the key name matches the ENV variable name,
+but the ENV variable is IN ALL CAPS. When you explicitly provide the ENV
+variable name, it **does not** automatically add the prefix.
+
+One important thing to recognize when working with ENV variables is that the
+value will be read each time it is accessed. Viper does not fix the value when
+the `BindEnv` is called.
+
+`AutomaticEnv` is a powerful helper especially when combined with
+`SetEnvPrefix`. When called, Viper will check for an environment variable any
+time a `viper.Get` request is made. It will apply the following rules. It will
+check for a environment variable with a name matching the key uppercased and
+prefixed with the `EnvPrefix` if set.
+
+`SetEnvReplacer` allows you to use a `strings.Replacer` object to rewrite Env
+keys to an extent. This is useful if you want to use `-` or something in your
+`Get()` calls, but want your environmental variables to use `_` delimiters. An
+example of using it can be found in `viper_test.go`.
+
+#### Env example
+
+```go
+SetEnvPrefix("spf") // will be uppercased automatically
+BindEnv("id")
+
+os.Setenv("SPF_ID", "13") // typically done outside of the app
+
+id := Get("id") // 13
+```
+
+### Working with Flags
+
+Viper has the ability to bind to flags. Specifically, Viper supports `Pflags`
+as used in the [Cobra](https://github.com/spf13/cobra) library.
+
+Like `BindEnv`, the value is not set when the binding method is called, but when
+it is accessed. This means you can bind as early as you want, even in an
+`init()` function.
+
+The `BindPFlag()` method provides this functionality.
+
+Example:
+
+```go
+serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+```
+
+The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude
+the use of other packages that use the [flag](https://golang.org/pkg/flag/)
+package from the standard library. The pflag package can handle the flags
+defined for the flag package by importing these flags. This is accomplished
+by a calling a convenience function provided by the pflag package called
+AddGoFlagSet().
+
+Example:
+
+```go
+package main
+
+import (
+ "flag"
+ "github.com/spf13/pflag"
+)
+
+func main() {
+ pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+ pflag.Parse()
+ ...
+}
+```
+
+#### Flag interfaces
+
+Viper provides two Go interfaces to bind other flag systems if you don't use `Pflags`.
+
+`FlagValue` represents a single flag. This is a very simple example on how to implement this interface:
+
+```go
+type myFlag struct {}
+func (f myFlag) HasChanged() bool { return false }
+func (f myFlag) Name() string { return "my-flag-name" }
+func (f myFlag) ValueString() string { return "my-flag-value" }
+func (f myFlag) ValueType() string { return "string" }
+```
+
+Once your flag implements this interface, you can simply tell Viper to bind it:
+
+```go
+viper.BindFlagValue("my-flag-name", myFlag{})
+```
+
+`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface:
+
+```go
+type myFlagSet struct {
+ flags []myFlag
+}
+
+func (f myFlagSet) VisitAll(fn func(FlagValue)) {
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+```
+
+Once your flag set implements this interface, you can simply tell Viper to bind it:
+
+```go
+fSet := myFlagSet{
+ flags: []myFlag{myFlag{}, myFlag{}},
+}
+viper.BindFlagValues("my-flags", fSet)
+```
+
+### Remote Key/Value Store Support
+
+To enable remote support in Viper, do a blank import of the `viper/remote`
+package:
+
+`import _ "github.com/spf13/viper/remote"`
+
+Viper will read a config string (as JSON, TOML, YAML or HCL) retrieved from a path
+in a Key/Value store such as etcd or Consul. These values take precedence over
+default values, but are overridden by configuration values retrieved from disk,
+flags, or environment variables.
+
+Viper uses [crypt](https://github.com/xordataexchange/crypt) to retrieve
+configuration from the K/V store, which means that you can store your
+configuration values encrypted and have them automatically decrypted if you have
+the correct gpg keyring. Encryption is optional.
+
+You can use remote configuration in conjunction with local configuration, or
+independently of it.
+
+`crypt` has a command-line helper that you can use to put configurations in your
+K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001.
+
+```bash
+$ go get github.com/xordataexchange/crypt/bin/crypt
+$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json
+```
+
+Confirm that your value was set:
+
+```bash
+$ crypt get -plaintext /config/hugo.json
+```
+
+See the `crypt` documentation for examples of how to set encrypted values, or
+how to use Consul.
+
+### Remote Key/Value Store Example - Unencrypted
+
+```go
+viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
+err := viper.ReadRemoteConfig()
+```
+
+### Remote Key/Value Store Example - Encrypted
+
+```go
+viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
+err := viper.ReadRemoteConfig()
+```
+
+### Watching Changes in etcd - Unencrypted
+
+```go
+// alternatively, you can create a new viper instance.
+var runtime_viper = viper.New()
+
+runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml")
+runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
+
+// read from remote config the first time.
+err := runtime_viper.ReadRemoteConfig()
+
+// unmarshal config
+runtime_viper.Unmarshal(&runtime_conf)
+
+// open a goroutine to watch remote changes forever
+go func(){
+ for {
+ time.Sleep(time.Second * 5) // delay after each request
+
+ // currently, only tested with etcd support
+ err := runtime_viper.WatchRemoteConfig()
+ if err != nil {
+ log.Errorf("unable to read remote config: %v", err)
+ continue
+ }
+
+ // unmarshal new config into our runtime config struct. you can also use channel
+ // to implement a signal to notify the system of the changes
+ runtime_viper.Unmarshal(&runtime_conf)
+ }
+}()
+```
+
+## Getting Values From Viper
+
+In Viper, there are a few ways to get a value depending on the value's type.
+The following functions and methods exist:
+
+ * `Get(key string) : interface{}`
+ * `GetBool(key string) : bool`
+ * `GetFloat64(key string) : float64`
+ * `GetInt(key string) : int`
+ * `GetString(key string) : string`
+ * `GetStringMap(key string) : map[string]interface{}`
+ * `GetStringMapString(key string) : map[string]string`
+ * `GetStringSlice(key string) : []string`
+ * `GetTime(key string) : time.Time`
+ * `GetDuration(key string) : time.Duration`
+ * `IsSet(key string) : bool`
+
+One important thing to recognize is that each Get function will return a zero
+value if it’s not found. To check if a given key exists, the `IsSet()` method
+has been provided.
+
+Example:
+```go
+viper.GetString("logfile") // case-insensitive Setting & Getting
+if viper.GetBool("verbose") {
+ fmt.Println("verbose enabled")
+}
+```
+### Accessing nested keys
+
+The accessor methods also accept formatted paths to deeply nested keys. For
+example, if the following JSON file is loaded:
+
+```json
+{
+ "host": {
+ "address": "localhost",
+ "port": 5799
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+```
+
+Viper can access a nested field by passing a `.` delimited path of keys:
+
+```go
+GetString("datastore.metric.host") // (returns "127.0.0.1")
+```
+
+This obeys the precedence rules established above; the search for the path
+will cascade through the remaining configuration registries until found.
+
+For example, given this configuration file, both `datastore.metric.host` and
+`datastore.metric.port` are already defined (and may be overridden). If in addition
+`datastore.metric.protocol` was defined in the defaults, Viper would also find it.
+
+However, if `datastore.metric` was overridden (by a flag, an environment variable,
+the `Set()` method, …) with an immediate value, then all sub-keys of
+`datastore.metric` become undefined, they are “shadowed” by the higher-priority
+configuration level.
+
+Lastly, if there exists a key that matches the delimited key path, its value
+will be returned instead. E.g.
+
+```json
+{
+ "datastore.metric.host": "0.0.0.0",
+ "host": {
+ "address": "localhost",
+ "port": 5799
+ },
+ "datastore": {
+ "metric": {
+ "host": "127.0.0.1",
+ "port": 3099
+ },
+ "warehouse": {
+ "host": "198.0.0.1",
+ "port": 2112
+ }
+ }
+}
+
+GetString("datastore.metric.host") // returns "0.0.0.0"
+```
+
+### Extract sub-tree
+
+Extract sub-tree from Viper.
+
+For example, `viper` represents:
+
+```json
+app:
+ cache1:
+ max-items: 100
+ item-size: 64
+ cache2:
+ max-items: 200
+ item-size: 80
+```
+
+After executing:
+
+```go
+subv := viper.Sub("app.cache1")
+```
+
+`subv` represents:
+
+```json
+max-items: 100
+item-size: 64
+```
+
+Suppose we have:
+
+```go
+func NewCache(cfg *Viper) *Cache {...}
+```
+
+which creates a cache based on config information formatted as `subv`.
+Now it's easy to create these 2 caches separately as:
+
+```go
+cfg1 := viper.Sub("app.cache1")
+cache1 := NewCache(cfg1)
+
+cfg2 := viper.Sub("app.cache2")
+cache2 := NewCache(cfg2)
+```
+
+### Unmarshaling
+
+You also have the option of Unmarshaling all or a specific value to a struct, map,
+etc.
+
+There are two methods to do this:
+
+ * `Unmarshal(rawVal interface{}) : error`
+ * `UnmarshalKey(key string, rawVal interface{}) : error`
+
+Example:
+
+```go
+type config struct {
+ Port int
+ Name string
+ PathMap string `mapstructure:"path_map"`
+}
+
+var C config
+
+err := Unmarshal(&C)
+if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+}
+```
+
+## Viper or Vipers?
+
+Viper comes ready to use out of the box. There is no configuration or
+initialization needed to begin using Viper. Since most applications will want
+to use a single central repository for their configuration, the viper package
+provides this. It is similar to a singleton.
+
+In all of the examples above, they demonstrate using viper in it's singleton
+style approach.
+
+### Working with multiple vipers
+
+You can also create many different vipers for use in your application. Each will
+have it’s own unique set of configurations and values. Each can read from a
+different config file, key value store, etc. All of the functions that viper
+package supports are mirrored as methods on a viper.
+
+Example:
+
+```go
+x := viper.New()
+y := viper.New()
+
+x.SetDefault("ContentDir", "content")
+y.SetDefault("ContentDir", "foobar")
+
+//...
+```
+
+When working with multiple vipers, it is up to the user to keep track of the
+different vipers.
+
+## Q & A
+
+Q: Why not INI files?
+
+A: Ini files are pretty awful. There’s no standard format, and they are hard to
+validate. Viper is designed to work with JSON, TOML or YAML files. If someone
+really wants to add this feature, I’d be happy to merge it. It’s easy to specify
+which formats your application will permit.
+
+Q: Why is it called “Viper”?
+
+A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe))
+to [Cobra](https://github.com/spf13/cobra). While both can operate completely
+independently, together they make a powerful pair to handle much of your
+application foundation needs.
+
+Q: Why is it called “Cobra”?
+
+A: Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)?
diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go
new file mode 100644
index 000000000..dd32f4e1c
--- /dev/null
+++ b/vendor/github.com/spf13/viper/flags.go
@@ -0,0 +1,57 @@
+package viper
+
+import "github.com/spf13/pflag"
+
+// FlagValueSet is an interface that users can implement
+// to bind a set of flags to viper.
+type FlagValueSet interface {
+ VisitAll(fn func(FlagValue))
+}
+
+// FlagValue is an interface that users can implement
+// to bind different flags to viper.
+type FlagValue interface {
+ HasChanged() bool
+ Name() string
+ ValueString() string
+ ValueType() string
+}
+
+// pflagValueSet is a wrapper around *pflag.ValueSet
+// that implements FlagValueSet.
+type pflagValueSet struct {
+ flags *pflag.FlagSet
+}
+
+// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet.
+func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) {
+ p.flags.VisitAll(func(flag *pflag.Flag) {
+ fn(pflagValue{flag})
+ })
+}
+
+// pflagValue is a wrapper aroung *pflag.flag
+// that implements FlagValue
+type pflagValue struct {
+ flag *pflag.Flag
+}
+
+// HasChanges returns whether the flag has changes or not.
+func (p pflagValue) HasChanged() bool {
+ return p.flag.Changed
+}
+
+// Name returns the name of the flag.
+func (p pflagValue) Name() string {
+ return p.flag.Name
+}
+
+// ValueString returns the value of the flag as a string.
+func (p pflagValue) ValueString() string {
+ return p.flag.Value.String()
+}
+
+// ValueType returns the type of the flag as a string.
+func (p pflagValue) ValueType() string {
+ return p.flag.Value.Type()
+}
diff --git a/vendor/github.com/spf13/viper/flags_test.go b/vendor/github.com/spf13/viper/flags_test.go
new file mode 100644
index 000000000..5bffca36c
--- /dev/null
+++ b/vendor/github.com/spf13/viper/flags_test.go
@@ -0,0 +1,66 @@
+package viper
+
+import (
+ "testing"
+
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBindFlagValueSet(t *testing.T) {
+ flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError)
+
+ var testValues = map[string]*string{
+ "host": nil,
+ "port": nil,
+ "endpoint": nil,
+ }
+
+ var mutatedTestValues = map[string]string{
+ "host": "localhost",
+ "port": "6060",
+ "endpoint": "/public",
+ }
+
+ for name := range testValues {
+ testValues[name] = flagSet.String(name, "", "test")
+ }
+
+ flagValueSet := pflagValueSet{flagSet}
+
+ err := BindFlagValues(flagValueSet)
+ if err != nil {
+ t.Fatalf("error binding flag set, %v", err)
+ }
+
+ flagSet.VisitAll(func(flag *pflag.Flag) {
+ flag.Value.Set(mutatedTestValues[flag.Name])
+ flag.Changed = true
+ })
+
+ for name, expected := range mutatedTestValues {
+ assert.Equal(t, Get(name), expected)
+ }
+}
+
+func TestBindFlagValue(t *testing.T) {
+ var testString = "testing"
+ var testValue = newStringValue(testString, &testString)
+
+ flag := &pflag.Flag{
+ Name: "testflag",
+ Value: testValue,
+ Changed: false,
+ }
+
+ flagValue := pflagValue{flag}
+ BindFlagValue("testvalue", flagValue)
+
+ assert.Equal(t, testString, Get("testvalue"))
+
+ flag.Value.Set("testing_mutate")
+ flag.Changed = true //hack for pflag usage
+
+ assert.Equal(t, "testing_mutate", Get("testvalue"))
+
+}
diff --git a/vendor/github.com/spf13/viper/nohup.out b/vendor/github.com/spf13/viper/nohup.out
new file mode 100644
index 000000000..8973bf27b
--- /dev/null
+++ b/vendor/github.com/spf13/viper/nohup.out
@@ -0,0 +1 @@
+QProcess::start: Process is already running
diff --git a/vendor/github.com/spf13/viper/overrides_test.go b/vendor/github.com/spf13/viper/overrides_test.go
new file mode 100644
index 000000000..dd2aa9b0d
--- /dev/null
+++ b/vendor/github.com/spf13/viper/overrides_test.go
@@ -0,0 +1,173 @@
+package viper
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/spf13/cast"
+ "github.com/stretchr/testify/assert"
+)
+
+type layer int
+
+const (
+ defaultLayer layer = iota + 1
+ overrideLayer
+)
+
+func TestNestedOverrides(t *testing.T) {
+ assert := assert.New(t)
+ var v *Viper
+
+ // Case 0: value overridden by a value
+ overrideDefault(assert, "tom", 10, "tom", 20) // "tom" is first given 10 as default value, then overridden by 20
+ override(assert, "tom", 10, "tom", 20) // "tom" is first given value 10, then overridden by 20
+ overrideDefault(assert, "tom.age", 10, "tom.age", 20)
+ override(assert, "tom.age", 10, "tom.age", 20)
+ overrideDefault(assert, "sawyer.tom.age", 10, "sawyer.tom.age", 20)
+ override(assert, "sawyer.tom.age", 10, "sawyer.tom.age", 20)
+
+ // Case 1: key:value overridden by a value
+ v = overrideDefault(assert, "tom.age", 10, "tom", "boy") // "tom.age" is first given 10 as default value, then "tom" is overridden by "boy"
+ assert.Nil(v.Get("tom.age")) // "tom.age" should not exist anymore
+ v = override(assert, "tom.age", 10, "tom", "boy")
+ assert.Nil(v.Get("tom.age"))
+
+ // Case 2: value overridden by a key:value
+ overrideDefault(assert, "tom", "boy", "tom.age", 10) // "tom" is first given "boy" as default value, then "tom" is overridden by map{"age":10}
+ override(assert, "tom.age", 10, "tom", "boy")
+
+ // Case 3: key:value overridden by a key:value
+ v = overrideDefault(assert, "tom.size", 4, "tom.age", 10)
+ assert.Equal(4, v.Get("tom.size")) // value should still be reachable
+ v = override(assert, "tom.size", 4, "tom.age", 10)
+ assert.Equal(4, v.Get("tom.size"))
+ deepCheckValue(assert, v, overrideLayer, []string{"tom", "size"}, 4)
+
+ // Case 4: key:value overridden by a map
+ v = overrideDefault(assert, "tom.size", 4, "tom", map[string]interface{}{"age": 10}) // "tom.size" is first given "4" as default value, then "tom" is overridden by map{"age":10}
+ assert.Equal(4, v.Get("tom.size")) // "tom.size" should still be reachable
+ assert.Equal(10, v.Get("tom.age")) // new value should be there
+ deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, 10) // new value should be there
+ v = override(assert, "tom.size", 4, "tom", map[string]interface{}{"age": 10})
+ assert.Nil(v.Get("tom.size"))
+ assert.Equal(10, v.Get("tom.age"))
+ deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, 10)
+
+ // Case 5: array overridden by a value
+ overrideDefault(assert, "tom", []int{10, 20}, "tom", 30)
+ override(assert, "tom", []int{10, 20}, "tom", 30)
+ overrideDefault(assert, "tom.age", []int{10, 20}, "tom.age", 30)
+ override(assert, "tom.age", []int{10, 20}, "tom.age", 30)
+
+ // Case 6: array overridden by an array
+ overrideDefault(assert, "tom", []int{10, 20}, "tom", []int{30, 40})
+ override(assert, "tom", []int{10, 20}, "tom", []int{30, 40})
+ overrideDefault(assert, "tom.age", []int{10, 20}, "tom.age", []int{30, 40})
+ v = override(assert, "tom.age", []int{10, 20}, "tom.age", []int{30, 40})
+ // explicit array merge:
+ s, ok := v.Get("tom.age").([]int)
+ if assert.True(ok, "tom[\"age\"] is not a slice") {
+ v.Set("tom.age", append(s, []int{50, 60}...))
+ assert.Equal([]int{30, 40, 50, 60}, v.Get("tom.age"))
+ deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, []int{30, 40, 50, 60})
+ }
+}
+
+func overrideDefault(assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper {
+ return overrideFromLayer(defaultLayer, assert, firstPath, firstValue, secondPath, secondValue)
+}
+func override(assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper {
+ return overrideFromLayer(overrideLayer, assert, firstPath, firstValue, secondPath, secondValue)
+}
+
+// overrideFromLayer performs the sequential override and low-level checks.
+//
+// First assignment is made on layer l for path firstPath with value firstValue,
+// the second one on the override layer (i.e., with the Set() function)
+// for path secondPath with value secondValue.
+//
+// firstPath and secondPath can include an arbitrary number of dots to indicate
+// a nested element.
+//
+// After each assignment, the value is checked, retrieved both by its full path
+// and by its key sequence (successive maps).
+func overrideFromLayer(l layer, assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper {
+ v := New()
+ firstKeys := strings.Split(firstPath, v.keyDelim)
+ if assert == nil ||
+ len(firstKeys) == 0 || len(firstKeys[0]) == 0 {
+ return v
+ }
+
+ // Set and check first value
+ switch l {
+ case defaultLayer:
+ v.SetDefault(firstPath, firstValue)
+ case overrideLayer:
+ v.Set(firstPath, firstValue)
+ default:
+ return v
+ }
+ assert.Equal(firstValue, v.Get(firstPath))
+ deepCheckValue(assert, v, l, firstKeys, firstValue)
+
+ // Override and check new value
+ secondKeys := strings.Split(secondPath, v.keyDelim)
+ if len(secondKeys) == 0 || len(secondKeys[0]) == 0 {
+ return v
+ }
+ v.Set(secondPath, secondValue)
+ assert.Equal(secondValue, v.Get(secondPath))
+ deepCheckValue(assert, v, overrideLayer, secondKeys, secondValue)
+
+ return v
+}
+
+// deepCheckValue checks that all given keys correspond to a valid path in the
+// configuration map of the given layer, and that the final value equals the one given
+func deepCheckValue(assert *assert.Assertions, v *Viper, l layer, keys []string, value interface{}) {
+ if assert == nil || v == nil ||
+ len(keys) == 0 || len(keys[0]) == 0 {
+ return
+ }
+
+ // init
+ var val interface{}
+ var ms string
+ switch l {
+ case defaultLayer:
+ val = v.defaults
+ ms = "v.defaults"
+ case overrideLayer:
+ val = v.override
+ ms = "v.override"
+ }
+
+ // loop through map
+ var m map[string]interface{}
+ err := false
+ for _, k := range keys {
+ if val == nil {
+ assert.Fail(fmt.Sprintf("%s is not a map[string]interface{}", ms))
+ return
+ }
+
+ // deep scan of the map to get the final value
+ switch val.(type) {
+ case map[interface{}]interface{}:
+ m = cast.ToStringMap(val)
+ case map[string]interface{}:
+ m = val.(map[string]interface{})
+ default:
+ assert.Fail(fmt.Sprintf("%s is not a map[string]interface{}", ms))
+ return
+ }
+ ms = ms + "[\"" + k + "\"]"
+ val = m[k]
+ }
+ if !err {
+ assert.Equal(value, val)
+ }
+}
diff --git a/vendor/github.com/spf13/viper/remote/remote.go b/vendor/github.com/spf13/viper/remote/remote.go
new file mode 100644
index 000000000..f100a9c7f
--- /dev/null
+++ b/vendor/github.com/spf13/viper/remote/remote.go
@@ -0,0 +1,107 @@
+// Copyright © 2015 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Package remote integrates the remote features of Viper.
+package remote
+
+import (
+ "bytes"
+ "github.com/spf13/viper"
+ crypt "github.com/xordataexchange/crypt/config"
+ "io"
+ "os"
+)
+
+type remoteConfigProvider struct{}
+
+func (rc remoteConfigProvider) Get(rp viper.RemoteProvider) (io.Reader, error) {
+ cm, err := getConfigManager(rp)
+ if err != nil {
+ return nil, err
+ }
+ b, err := cm.Get(rp.Path())
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewReader(b), nil
+}
+
+func (rc remoteConfigProvider) Watch(rp viper.RemoteProvider) (io.Reader, error) {
+ cm, err := getConfigManager(rp)
+ if err != nil {
+ return nil, err
+ }
+ resp,err := cm.Get(rp.Path())
+ if err != nil {
+ return nil, err
+ }
+
+ return bytes.NewReader(resp), nil
+}
+func (rc remoteConfigProvider) WatchChannel(rp viper.RemoteProvider) (<-chan *viper.RemoteResponse, chan bool) {
+ cm, err := getConfigManager(rp)
+ if err != nil {
+ return nil, nil
+ }
+ quit := make(chan bool)
+ quitwc := make(chan bool)
+ viperResponsCh := make(chan *viper.RemoteResponse)
+ cryptoResponseCh := cm.Watch(rp.Path(), quit)
+ // need this function to convert the Channel response form crypt.Response to viper.Response
+ go func(cr <-chan *crypt.Response,vr chan<- *viper.RemoteResponse, quitwc <-chan bool, quit chan<- bool) {
+ for {
+ select {
+ case <- quitwc:
+ quit <- true
+ return
+ case resp := <-cr:
+ vr <- &viper.RemoteResponse{
+ Error: resp.Error,
+ Value: resp.Value,
+ }
+
+ }
+
+ }
+ }(cryptoResponseCh,viperResponsCh,quitwc,quit)
+
+ return viperResponsCh,quitwc
+
+}
+
+
+func getConfigManager(rp viper.RemoteProvider) (crypt.ConfigManager, error) {
+
+ var cm crypt.ConfigManager
+ var err error
+
+ if rp.SecretKeyring() != "" {
+ kr, err := os.Open(rp.SecretKeyring())
+ defer kr.Close()
+ if err != nil {
+ return nil, err
+ }
+ if rp.Provider() == "etcd" {
+ cm, err = crypt.NewEtcdConfigManager([]string{rp.Endpoint()}, kr)
+ } else {
+ cm, err = crypt.NewConsulConfigManager([]string{rp.Endpoint()}, kr)
+ }
+ } else {
+ if rp.Provider() == "etcd" {
+ cm, err = crypt.NewStandardEtcdConfigManager([]string{rp.Endpoint()})
+ } else {
+ cm, err = crypt.NewStandardConsulConfigManager([]string{rp.Endpoint()})
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ return cm, nil
+
+}
+
+func init() {
+ viper.RemoteConfig = &remoteConfigProvider{}
+}
diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go
new file mode 100644
index 000000000..3ebada91a
--- /dev/null
+++ b/vendor/github.com/spf13/viper/util.go
@@ -0,0 +1,282 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+package viper
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "github.com/hashicorp/hcl"
+ "github.com/magiconair/properties"
+ toml "github.com/pelletier/go-toml"
+ "github.com/spf13/cast"
+ jww "github.com/spf13/jwalterweatherman"
+ "gopkg.in/yaml.v2"
+)
+
+// ConfigParseError denotes failing to parse configuration file.
+type ConfigParseError struct {
+ err error
+}
+
+// Error returns the formatted configuration error.
+func (pe ConfigParseError) Error() string {
+ return fmt.Sprintf("While parsing config: %s", pe.err.Error())
+}
+
+// toCaseInsensitiveValue checks if the value is a map;
+// if so, create a copy and lower-case the keys recursively.
+func toCaseInsensitiveValue(value interface{}) interface{} {
+ switch v := value.(type) {
+ case map[interface{}]interface{}:
+ value = copyAndInsensitiviseMap(cast.ToStringMap(v))
+ case map[string]interface{}:
+ value = copyAndInsensitiviseMap(v)
+ }
+
+ return value
+}
+
+// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of
+// any map it makes case insensitive.
+func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} {
+ nm := make(map[string]interface{})
+
+ for key, val := range m {
+ lkey := strings.ToLower(key)
+ switch v := val.(type) {
+ case map[interface{}]interface{}:
+ nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v))
+ case map[string]interface{}:
+ nm[lkey] = copyAndInsensitiviseMap(v)
+ default:
+ nm[lkey] = v
+ }
+ }
+
+ return nm
+}
+
+func insensitiviseMap(m map[string]interface{}) {
+ for key, val := range m {
+ switch val.(type) {
+ case map[interface{}]interface{}:
+ // nested map: cast and recursively insensitivise
+ val = cast.ToStringMap(val)
+ insensitiviseMap(val.(map[string]interface{}))
+ case map[string]interface{}:
+ // nested map: recursively insensitivise
+ insensitiviseMap(val.(map[string]interface{}))
+ }
+
+ lower := strings.ToLower(key)
+ if key != lower {
+ // remove old key (not lower-cased)
+ delete(m, key)
+ }
+ // update map
+ m[lower] = val
+ }
+}
+
+func absPathify(inPath string) string {
+ jww.INFO.Println("Trying to resolve absolute path to", inPath)
+
+ if strings.HasPrefix(inPath, "$HOME") {
+ inPath = userHomeDir() + inPath[5:]
+ }
+
+ if strings.HasPrefix(inPath, "$") {
+ end := strings.Index(inPath, string(os.PathSeparator))
+ inPath = os.Getenv(inPath[1:end]) + inPath[end:]
+ }
+
+ if filepath.IsAbs(inPath) {
+ return filepath.Clean(inPath)
+ }
+
+ p, err := filepath.Abs(inPath)
+ if err == nil {
+ return filepath.Clean(p)
+ }
+
+ jww.ERROR.Println("Couldn't discover absolute path")
+ jww.ERROR.Println(err)
+ return ""
+}
+
+// Check if File / Directory Exists
+func exists(path string) (bool, error) {
+ _, err := v.fs.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func userHomeDir() string {
+ if runtime.GOOS == "windows" {
+ home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
+ if home == "" {
+ home = os.Getenv("USERPROFILE")
+ }
+ return home
+ }
+ return os.Getenv("HOME")
+}
+
+func unmarshallConfigReader(in io.Reader, c map[string]interface{}, configType string) error {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(in)
+
+ switch strings.ToLower(configType) {
+ case "yaml", "yml":
+ if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil {
+ return ConfigParseError{err}
+ }
+
+ case "json":
+ if err := json.Unmarshal(buf.Bytes(), &c); err != nil {
+ return ConfigParseError{err}
+ }
+
+ case "hcl":
+ obj, err := hcl.Parse(string(buf.Bytes()))
+ if err != nil {
+ return ConfigParseError{err}
+ }
+ if err = hcl.DecodeObject(&c, obj); err != nil {
+ return ConfigParseError{err}
+ }
+
+ case "toml":
+ tree, err := toml.LoadReader(buf)
+ if err != nil {
+ return ConfigParseError{err}
+ }
+ tmap := tree.ToMap()
+ for k, v := range tmap {
+ c[k] = v
+ }
+
+ case "properties", "props", "prop":
+ var p *properties.Properties
+ var err error
+ if p, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil {
+ return ConfigParseError{err}
+ }
+ for _, key := range p.Keys() {
+ value, _ := p.Get(key)
+ // recursively build nested maps
+ path := strings.Split(key, ".")
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(c, path[0:len(path)-1])
+ // set innermost value
+ deepestMap[lastKey] = value
+ }
+ }
+
+ insensitiviseMap(c)
+ return nil
+}
+
+func safeMul(a, b uint) uint {
+ c := a * b
+ if a > 1 && b > 1 && c/b != a {
+ return 0
+ }
+ return c
+}
+
+// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes
+func parseSizeInBytes(sizeStr string) uint {
+ sizeStr = strings.TrimSpace(sizeStr)
+ lastChar := len(sizeStr) - 1
+ multiplier := uint(1)
+
+ if lastChar > 0 {
+ if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
+ if lastChar > 1 {
+ switch unicode.ToLower(rune(sizeStr[lastChar-1])) {
+ case 'k':
+ multiplier = 1 << 10
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ case 'm':
+ multiplier = 1 << 20
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ case 'g':
+ multiplier = 1 << 30
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+ default:
+ multiplier = 1
+ sizeStr = strings.TrimSpace(sizeStr[:lastChar])
+ }
+ }
+ }
+ }
+
+ size := cast.ToInt(sizeStr)
+ if size < 0 {
+ size = 0
+ }
+
+ return safeMul(uint(size), multiplier)
+}
+
+// deepSearch scans deep maps, following the key indexes listed in the
+// sequence "path".
+// The last value is expected to be another map, and is returned.
+//
+// In case intermediate keys do not exist, or map to a non-map value,
+// a new map is created and inserted, and the search continues from there:
+// the initial map "m" may be modified!
+func deepSearch(m map[string]interface{}, path []string) map[string]interface{} {
+ for _, k := range path {
+ m2, ok := m[k]
+ if !ok {
+ // intermediate key does not exist
+ // => create it and continue from there
+ m3 := make(map[string]interface{})
+ m[k] = m3
+ m = m3
+ continue
+ }
+ m3, ok := m2.(map[string]interface{})
+ if !ok {
+ // intermediate key is a value
+ // => replace with a new map
+ m3 = make(map[string]interface{})
+ m[k] = m3
+ }
+ // continue search from here
+ m = m3
+ }
+ return m
+}
diff --git a/vendor/github.com/spf13/viper/util_test.go b/vendor/github.com/spf13/viper/util_test.go
new file mode 100644
index 000000000..5949e09e4
--- /dev/null
+++ b/vendor/github.com/spf13/viper/util_test.go
@@ -0,0 +1,55 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+package viper
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestCopyAndInsensitiviseMap(t *testing.T) {
+
+ var (
+ given = map[string]interface{}{
+ "Foo": 32,
+ "Bar": map[interface{}]interface {
+ }{
+ "ABc": "A",
+ "cDE": "B"},
+ }
+ expected = map[string]interface{}{
+ "foo": 32,
+ "bar": map[string]interface {
+ }{
+ "abc": "A",
+ "cde": "B"},
+ }
+ )
+
+ got := copyAndInsensitiviseMap(given)
+
+ if !reflect.DeepEqual(got, expected) {
+ t.Fatalf("Got %q\nexpected\n%q", got, expected)
+ }
+
+ if _, ok := given["foo"]; ok {
+ t.Fatal("Input map changed")
+ }
+
+ if _, ok := given["bar"]; ok {
+ t.Fatal("Input map changed")
+ }
+
+ m := given["Bar"].(map[interface{}]interface{})
+ if _, ok := m["ABc"]; !ok {
+ t.Fatal("Input map changed")
+ }
+}
diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go
new file mode 100644
index 000000000..22a2ed8ad
--- /dev/null
+++ b/vendor/github.com/spf13/viper/viper.go
@@ -0,0 +1,1550 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+// Each item takes precedence over the item below it:
+
+// overrides
+// flag
+// env
+// config
+// key/value store
+// default
+
+package viper
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/fsnotify/fsnotify"
+ "github.com/mitchellh/mapstructure"
+ "github.com/spf13/afero"
+ "github.com/spf13/cast"
+ jww "github.com/spf13/jwalterweatherman"
+ "github.com/spf13/pflag"
+)
+
+var v *Viper
+
+type RemoteResponse struct {
+ Value []byte
+ Error error
+}
+
+func init() {
+ v = New()
+}
+
+type remoteConfigFactory interface {
+ Get(rp RemoteProvider) (io.Reader, error)
+ Watch(rp RemoteProvider) (io.Reader, error)
+ WatchChannel(rp RemoteProvider)(<-chan *RemoteResponse, chan bool)
+}
+
+// RemoteConfig is optional, see the remote package
+var RemoteConfig remoteConfigFactory
+
+// UnsupportedConfigError denotes encountering an unsupported
+// configuration filetype.
+type UnsupportedConfigError string
+
+// Error returns the formatted configuration error.
+func (str UnsupportedConfigError) Error() string {
+ return fmt.Sprintf("Unsupported Config Type %q", string(str))
+}
+
+// UnsupportedRemoteProviderError denotes encountering an unsupported remote
+// provider. Currently only etcd and Consul are
+// supported.
+type UnsupportedRemoteProviderError string
+
+// Error returns the formatted remote provider error.
+func (str UnsupportedRemoteProviderError) Error() string {
+ return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
+}
+
+// RemoteConfigError denotes encountering an error while trying to
+// pull the configuration from the remote provider.
+type RemoteConfigError string
+
+// Error returns the formatted remote provider error
+func (rce RemoteConfigError) Error() string {
+ return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
+}
+
+// ConfigFileNotFoundError denotes failing to find configuration file.
+type ConfigFileNotFoundError struct {
+ name, locations string
+}
+
+// Error returns the formatted configuration error.
+func (fnfe ConfigFileNotFoundError) Error() string {
+ return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
+}
+
+// Viper is a prioritized configuration registry. It
+// maintains a set of configuration sources, fetches
+// values to populate those, and provides them according
+// to the source's priority.
+// The priority of the sources is the following:
+// 1. overrides
+// 2. flags
+// 3. env. variables
+// 4. config file
+// 5. key/value store
+// 6. defaults
+//
+// For example, if values from the following sources were loaded:
+//
+// Defaults : {
+// "secret": "",
+// "user": "default",
+// "endpoint": "https://localhost"
+// }
+// Config : {
+// "user": "root"
+// "secret": "defaultsecret"
+// }
+// Env : {
+// "secret": "somesecretkey"
+// }
+//
+// The resulting config will have the following values:
+//
+// {
+// "secret": "somesecretkey",
+// "user": "root",
+// "endpoint": "https://localhost"
+// }
+type Viper struct {
+ // Delimiter that separates a list of keys
+ // used to access a nested value in one go
+ keyDelim string
+
+ // A set of paths to look for the config file in
+ configPaths []string
+
+ // The filesystem to read config from.
+ fs afero.Fs
+
+ // A set of remote providers to search for the configuration
+ remoteProviders []*defaultRemoteProvider
+
+ // Name of file to look for inside the path
+ configName string
+ configFile string
+ configType string
+ envPrefix string
+
+ automaticEnvApplied bool
+ envKeyReplacer *strings.Replacer
+
+ config map[string]interface{}
+ override map[string]interface{}
+ defaults map[string]interface{}
+ kvstore map[string]interface{}
+ pflags map[string]FlagValue
+ env map[string]string
+ aliases map[string]string
+ typeByDefValue bool
+
+ onConfigChange func(fsnotify.Event)
+}
+
+// New returns an initialized Viper instance.
+func New() *Viper {
+ v := new(Viper)
+ v.keyDelim = "."
+ v.configName = "config"
+ v.fs = afero.NewOsFs()
+ v.config = make(map[string]interface{})
+ v.override = make(map[string]interface{})
+ v.defaults = make(map[string]interface{})
+ v.kvstore = make(map[string]interface{})
+ v.pflags = make(map[string]FlagValue)
+ v.env = make(map[string]string)
+ v.aliases = make(map[string]string)
+ v.typeByDefValue = false
+
+ return v
+}
+
+// Intended for testing, will reset all to default settings.
+// In the public interface for the viper package so applications
+// can use it in their testing as well.
+func Reset() {
+ v = New()
+ SupportedExts = []string{"json", "toml", "yaml", "yml", "hcl"}
+ SupportedRemoteProviders = []string{"etcd", "consul"}
+}
+
+type defaultRemoteProvider struct {
+ provider string
+ endpoint string
+ path string
+ secretKeyring string
+}
+
+func (rp defaultRemoteProvider) Provider() string {
+ return rp.provider
+}
+
+func (rp defaultRemoteProvider) Endpoint() string {
+ return rp.endpoint
+}
+
+func (rp defaultRemoteProvider) Path() string {
+ return rp.path
+}
+
+func (rp defaultRemoteProvider) SecretKeyring() string {
+ return rp.secretKeyring
+}
+
+// RemoteProvider stores the configuration necessary
+// to connect to a remote key/value store.
+// Optional secretKeyring to unencrypt encrypted values
+// can be provided.
+type RemoteProvider interface {
+ Provider() string
+ Endpoint() string
+ Path() string
+ SecretKeyring() string
+}
+
+// SupportedExts are universally supported extensions.
+var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"}
+
+// SupportedRemoteProviders are universally supported remote providers.
+var SupportedRemoteProviders = []string{"etcd", "consul"}
+
+func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
+func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
+ v.onConfigChange = run
+}
+
+func WatchConfig() { v.WatchConfig() }
+func (v *Viper) WatchConfig() {
+ go func() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
+ filename, err := v.getConfigFile()
+ if err != nil {
+ log.Println("error:", err)
+ return
+ }
+
+ configFile := filepath.Clean(filename)
+ configDir, _ := filepath.Split(configFile)
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event := <-watcher.Events:
+ // we only care about the config file
+ if filepath.Clean(event.Name) == configFile {
+ if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create {
+ err := v.ReadInConfig()
+ if err != nil {
+ log.Println("error:", err)
+ }
+ v.onConfigChange(event)
+ }
+ }
+ case err := <-watcher.Errors:
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ watcher.Add(configDir)
+ <-done
+ }()
+}
+
+// SetConfigFile explicitly defines the path, name and extension of the config file
+// Viper will use this and not check any of the config paths
+func SetConfigFile(in string) { v.SetConfigFile(in) }
+func (v *Viper) SetConfigFile(in string) {
+ if in != "" {
+ v.configFile = in
+ }
+}
+
+// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
+// E.g. if your prefix is "spf", the env registry
+// will look for env. variables that start with "SPF_"
+func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
+func (v *Viper) SetEnvPrefix(in string) {
+ if in != "" {
+ v.envPrefix = in
+ }
+}
+
+func (v *Viper) mergeWithEnvPrefix(in string) string {
+ if v.envPrefix != "" {
+ return strings.ToUpper(v.envPrefix + "_" + in)
+ }
+
+ return strings.ToUpper(in)
+}
+
+// TODO: should getEnv logic be moved into find(). Can generalize the use of
+// rewriting keys many things, Ex: Get('someKey') -> some_key
+// (cammel case to snake case for JSON keys perhaps)
+
+// getEnv is a wrapper around os.Getenv which replaces characters in the original
+// key. This allows env vars which have different keys then the config object
+// keys
+func (v *Viper) getEnv(key string) string {
+ if v.envKeyReplacer != nil {
+ key = v.envKeyReplacer.Replace(key)
+ }
+ return os.Getenv(key)
+}
+
+// ConfigFileUsed returns the file used to populate the config registry
+func ConfigFileUsed() string { return v.ConfigFileUsed() }
+func (v *Viper) ConfigFileUsed() string { return v.configFile }
+
+// AddConfigPath adds a path for Viper to search for the config file in.
+// Can be called multiple times to define multiple search paths.
+func AddConfigPath(in string) { v.AddConfigPath(in) }
+func (v *Viper) AddConfigPath(in string) {
+ if in != "" {
+ absin := absPathify(in)
+ jww.INFO.Println("adding", absin, "to paths to search")
+ if !stringInSlice(absin, v.configPaths) {
+ v.configPaths = append(v.configPaths, absin)
+ }
+ }
+}
+
+// AddRemoteProvider adds a remote configuration source.
+// Remote Providers are searched in the order they are added.
+// provider is a string value, "etcd" or "consul" are currently supported.
+// endpoint is the url. etcd requires http://ip:port consul requires ip:port
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+func AddRemoteProvider(provider, endpoint, path string) error {
+ return v.AddRemoteProvider(provider, endpoint, path)
+}
+func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
+ if !stringInSlice(provider, SupportedRemoteProviders) {
+ return UnsupportedRemoteProviderError(provider)
+ }
+ if provider != "" && endpoint != "" {
+ jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+ rp := &defaultRemoteProvider{
+ endpoint: endpoint,
+ provider: provider,
+ path: path,
+ }
+ if !v.providerPathExists(rp) {
+ v.remoteProviders = append(v.remoteProviders, rp)
+ }
+ }
+ return nil
+}
+
+// AddSecureRemoteProvider adds a remote configuration source.
+// Secure Remote Providers are searched in the order they are added.
+// provider is a string value, "etcd" or "consul" are currently supported.
+// endpoint is the url. etcd requires http://ip:port consul requires ip:port
+// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+// Secure Remote Providers are implemented with github.com/xordataexchange/crypt
+func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+ return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
+}
+
+func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+ if !stringInSlice(provider, SupportedRemoteProviders) {
+ return UnsupportedRemoteProviderError(provider)
+ }
+ if provider != "" && endpoint != "" {
+ jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+ rp := &defaultRemoteProvider{
+ endpoint: endpoint,
+ provider: provider,
+ path: path,
+ secretKeyring: secretkeyring,
+ }
+ if !v.providerPathExists(rp) {
+ v.remoteProviders = append(v.remoteProviders, rp)
+ }
+ }
+ return nil
+}
+
+func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
+ for _, y := range v.remoteProviders {
+ if reflect.DeepEqual(y, p) {
+ return true
+ }
+ }
+ return false
+}
+
+// searchMap recursively searches for a value for path in source map.
+// Returns nil if not found.
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
+ if len(path) == 0 {
+ return source
+ }
+
+ next, ok := source[path[0]]
+ if ok {
+ // Fast path
+ if len(path) == 1 {
+ return next
+ }
+
+ // Nested case
+ switch next.(type) {
+ case map[interface{}]interface{}:
+ return v.searchMap(cast.ToStringMap(next), path[1:])
+ case map[string]interface{}:
+ // Type assertion is safe here since it is only reached
+ // if the type of `next` is the same as the type being asserted
+ return v.searchMap(next.(map[string]interface{}), path[1:])
+ default:
+ // got a value but nested key expected, return "nil" for not found
+ return nil
+ }
+ }
+ return nil
+}
+
+// searchMapWithPathPrefixes recursively searches for a value for path in source map.
+//
+// While searchMap() considers each path element as a single map key, this
+// function searches for, and prioritizes, merged path elements.
+// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
+// is also defined, this latter value is returned for path ["foo", "bar"].
+//
+// This should be useful only at config level (other maps may not contain dots
+// in their keys).
+//
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} {
+ if len(path) == 0 {
+ return source
+ }
+
+ // search for path prefixes, starting from the longest one
+ for i := len(path); i > 0; i-- {
+ prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim))
+
+ next, ok := source[prefixKey]
+ if ok {
+ // Fast path
+ if i == len(path) {
+ return next
+ }
+
+ // Nested case
+ var val interface{}
+ switch next.(type) {
+ case map[interface{}]interface{}:
+ val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:])
+ case map[string]interface{}:
+ // Type assertion is safe here since it is only reached
+ // if the type of `next` is the same as the type being asserted
+ val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:])
+ default:
+ // got a value but nested key expected, do nothing and look for next prefix
+ }
+ if val != nil {
+ return val
+ }
+ }
+ }
+
+ // not found
+ return nil
+}
+
+// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
+// on its path in the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
+ var parentVal interface{}
+ for i := 1; i < len(path); i++ {
+ parentVal = v.searchMap(m, path[0:i])
+ if parentVal == nil {
+ // not found, no need to add more path elements
+ return ""
+ }
+ switch parentVal.(type) {
+ case map[interface{}]interface{}:
+ continue
+ case map[string]interface{}:
+ continue
+ default:
+ // parentVal is a regular value which shadows "path"
+ return strings.Join(path[0:i], v.keyDelim)
+ }
+ }
+ return ""
+}
+
+// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
+// in a sub-path of the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
+ // unify input map
+ var m map[string]interface{}
+ switch mi.(type) {
+ case map[string]string, map[string]FlagValue:
+ m = cast.ToStringMap(mi)
+ default:
+ return ""
+ }
+
+ // scan paths
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if _, ok := m[parentKey]; ok {
+ return parentKey
+ }
+ }
+ return ""
+}
+
+// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
+// in the environment, when automatic env is on.
+// e.g., if "foo.bar" has a value in the environment, it “shadows”
+// "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
+ var parentKey string
+ var val string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if val = v.getEnv(v.mergeWithEnvPrefix(parentKey)); val != "" {
+ return parentKey
+ }
+ }
+ return ""
+}
+
+// SetTypeByDefaultValue enables or disables the inference of a key value's
+// type when the Get function is used based upon a key's default value as
+// opposed to the value returned based on the normal fetch logic.
+//
+// For example, if a key has a default value of []string{} and the same key
+// is set via an environment variable to "a b c", a call to the Get function
+// would return a string slice for the key if the key's type is inferred by
+// the default value and the Get function would return:
+//
+// []string {"a", "b", "c"}
+//
+// Otherwise the Get function would return:
+//
+// "a b c"
+func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
+func (v *Viper) SetTypeByDefaultValue(enable bool) {
+ v.typeByDefValue = enable
+}
+
+// GetViper gets the global Viper instance.
+func GetViper() *Viper {
+ return v
+}
+
+// Get can retrieve any value given the key to use.
+// Get is case-insensitive for a key.
+// Get has the behavior of returning the value associated with the first
+// place from where it is set. Viper will check in the following order:
+// override, flag, env, config file, key/value store, default
+//
+// Get returns an interface. For a specific value use one of the Get____ methods.
+func Get(key string) interface{} { return v.Get(key) }
+func (v *Viper) Get(key string) interface{} {
+ lcaseKey := strings.ToLower(key)
+ val := v.find(lcaseKey)
+ if val == nil {
+ return nil
+ }
+
+ valType := val
+ if v.typeByDefValue {
+ // TODO(bep) this branch isn't covered by a single test.
+ path := strings.Split(lcaseKey, v.keyDelim)
+ defVal := v.searchMap(v.defaults, path)
+ if defVal != nil {
+ valType = defVal
+ }
+ }
+
+ switch valType.(type) {
+ case bool:
+ return cast.ToBool(val)
+ case string:
+ return cast.ToString(val)
+ case int64, int32, int16, int8, int:
+ return cast.ToInt(val)
+ case float64, float32:
+ return cast.ToFloat64(val)
+ case time.Time:
+ return cast.ToTime(val)
+ case time.Duration:
+ return cast.ToDuration(val)
+ case []string:
+ return cast.ToStringSlice(val)
+ }
+ return val
+}
+
+// Sub returns new Viper instance representing a sub tree of this instance.
+// Sub is case-insensitive for a key.
+func Sub(key string) *Viper { return v.Sub(key) }
+func (v *Viper) Sub(key string) *Viper {
+ subv := New()
+ data := v.Get(key)
+ if data == nil {
+ return nil
+ }
+
+ if reflect.TypeOf(data).Kind() == reflect.Map {
+ subv.config = cast.ToStringMap(data)
+ return subv
+ }
+ return nil
+}
+
+// GetString returns the value associated with the key as a string.
+func GetString(key string) string { return v.GetString(key) }
+func (v *Viper) GetString(key string) string {
+ return cast.ToString(v.Get(key))
+}
+
+// GetBool returns the value associated with the key as a boolean.
+func GetBool(key string) bool { return v.GetBool(key) }
+func (v *Viper) GetBool(key string) bool {
+ return cast.ToBool(v.Get(key))
+}
+
+// GetInt returns the value associated with the key as an integer.
+func GetInt(key string) int { return v.GetInt(key) }
+func (v *Viper) GetInt(key string) int {
+ return cast.ToInt(v.Get(key))
+}
+
+// GetInt64 returns the value associated with the key as an integer.
+func GetInt64(key string) int64 { return v.GetInt64(key) }
+func (v *Viper) GetInt64(key string) int64 {
+ return cast.ToInt64(v.Get(key))
+}
+
+// GetFloat64 returns the value associated with the key as a float64.
+func GetFloat64(key string) float64 { return v.GetFloat64(key) }
+func (v *Viper) GetFloat64(key string) float64 {
+ return cast.ToFloat64(v.Get(key))
+}
+
+// GetTime returns the value associated with the key as time.
+func GetTime(key string) time.Time { return v.GetTime(key) }
+func (v *Viper) GetTime(key string) time.Time {
+ return cast.ToTime(v.Get(key))
+}
+
+// GetDuration returns the value associated with the key as a duration.
+func GetDuration(key string) time.Duration { return v.GetDuration(key) }
+func (v *Viper) GetDuration(key string) time.Duration {
+ return cast.ToDuration(v.Get(key))
+}
+
+// GetStringSlice returns the value associated with the key as a slice of strings.
+func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
+func (v *Viper) GetStringSlice(key string) []string {
+ return cast.ToStringSlice(v.Get(key))
+}
+
+// GetStringMap returns the value associated with the key as a map of interfaces.
+func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
+func (v *Viper) GetStringMap(key string) map[string]interface{} {
+ return cast.ToStringMap(v.Get(key))
+}
+
+// GetStringMapString returns the value associated with the key as a map of strings.
+func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
+func (v *Viper) GetStringMapString(key string) map[string]string {
+ return cast.ToStringMapString(v.Get(key))
+}
+
+// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
+func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
+func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
+ return cast.ToStringMapStringSlice(v.Get(key))
+}
+
+// GetSizeInBytes returns the size of the value associated with the given key
+// in bytes.
+func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
+func (v *Viper) GetSizeInBytes(key string) uint {
+ sizeStr := cast.ToString(v.Get(key))
+ return parseSizeInBytes(sizeStr)
+}
+
+// UnmarshalKey takes a single key and unmarshals it into a Struct.
+func UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal) }
+func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error {
+ return mapstructure.Decode(v.Get(key), rawVal)
+}
+
+// Unmarshal unmarshals the config into a Struct. Make sure that the tags
+// on the fields of the structure are properly set.
+func Unmarshal(rawVal interface{}) error { return v.Unmarshal(rawVal) }
+func (v *Viper) Unmarshal(rawVal interface{}) error {
+ err := decode(v.AllSettings(), defaultDecoderConfig(rawVal))
+
+ if err != nil {
+ return err
+ }
+
+ v.insensitiviseMaps()
+
+ return nil
+}
+
+// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
+// of time.Duration values
+func defaultDecoderConfig(output interface{}) *mapstructure.DecoderConfig {
+ return &mapstructure.DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ WeaklyTypedInput: true,
+ DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
+ }
+}
+
+// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
+func decode(input interface{}, config *mapstructure.DecoderConfig) error {
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return err
+ }
+ return decoder.Decode(input)
+}
+
+// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
+// in the destination struct.
+func (v *Viper) UnmarshalExact(rawVal interface{}) error {
+ config := defaultDecoderConfig(rawVal)
+ config.ErrorUnused = true
+
+ err := decode(v.AllSettings(), config)
+
+ if err != nil {
+ return err
+ }
+
+ v.insensitiviseMaps()
+
+ return nil
+}
+
+// BindPFlags binds a full flag set to the configuration, using each flag's long
+// name as the config key.
+func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
+func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
+ return v.BindFlagValues(pflagValueSet{flags})
+}
+
+// BindPFlag binds a specific key to a pflag (as used by cobra).
+// Example (where serverCmd is a Cobra instance):
+//
+// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+//
+func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
+func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
+ return v.BindFlagValue(key, pflagValue{flag})
+}
+
+// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
+// name as the config key.
+func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
+func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
+ flags.VisitAll(func(flag FlagValue) {
+ if err = v.BindFlagValue(flag.Name(), flag); err != nil {
+ return
+ }
+ })
+ return nil
+}
+
+// BindFlagValue binds a specific key to a FlagValue.
+// Example(where serverCmd is a Cobra instance):
+//
+// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+// Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port"))
+//
+func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
+func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
+ if flag == nil {
+ return fmt.Errorf("flag for %q is nil", key)
+ }
+ v.pflags[strings.ToLower(key)] = flag
+ return nil
+}
+
+// BindEnv binds a Viper key to a ENV variable.
+// ENV variables are case sensitive.
+// If only a key is provided, it will use the env key matching the key, uppercased.
+// EnvPrefix will be used when set when env name is not provided.
+func BindEnv(input ...string) error { return v.BindEnv(input...) }
+func (v *Viper) BindEnv(input ...string) error {
+ var key, envkey string
+ if len(input) == 0 {
+ return fmt.Errorf("BindEnv missing key to bind to")
+ }
+
+ key = strings.ToLower(input[0])
+
+ if len(input) == 1 {
+ envkey = v.mergeWithEnvPrefix(key)
+ } else {
+ envkey = input[1]
+ }
+
+ v.env[key] = envkey
+
+ return nil
+}
+
+// Given a key, find the value.
+// Viper will check in the following order:
+// flag, env, config file, key/value store, default.
+// Viper will check to see if an alias exists first.
+// Note: this assumes a lower-cased key given.
+func (v *Viper) find(lcaseKey string) interface{} {
+
+ var (
+ val interface{}
+ exists bool
+ path = strings.Split(lcaseKey, v.keyDelim)
+ nested = len(path) > 1
+ )
+
+ // compute the path through the nested maps to the nested value
+ if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
+ return nil
+ }
+
+ // if the requested key is an alias, then return the proper key
+ lcaseKey = v.realKey(lcaseKey)
+ path = strings.Split(lcaseKey, v.keyDelim)
+ nested = len(path) > 1
+
+ // Set() override first
+ val = v.searchMap(v.override, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
+ return nil
+ }
+
+ // PFlag override next
+ flag, exists := v.pflags[lcaseKey]
+ if exists && flag.HasChanged() {
+ switch flag.ValueType() {
+ case "int", "int8", "int16", "int32", "int64":
+ return cast.ToInt(flag.ValueString())
+ case "bool":
+ return cast.ToBool(flag.ValueString())
+ case "stringSlice":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ return strings.TrimSuffix(s, "]")
+ default:
+ return flag.ValueString()
+ }
+ }
+ if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
+ return nil
+ }
+
+ // Env override next
+ if v.automaticEnvApplied {
+ // even if it hasn't been registered, if automaticEnv is used,
+ // check any Get request
+ if val = v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); val != "" {
+ return val
+ }
+ if nested && v.isPathShadowedInAutoEnv(path) != "" {
+ return nil
+ }
+ }
+ envkey, exists := v.env[lcaseKey]
+ if exists {
+ if val = v.getEnv(envkey); val != "" {
+ return val
+ }
+ }
+ if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
+ return nil
+ }
+
+ // Config file next
+ val = v.searchMapWithPathPrefixes(v.config, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
+ return nil
+ }
+
+ // K/V store next
+ val = v.searchMap(v.kvstore, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
+ return nil
+ }
+
+ // Default next
+ val = v.searchMap(v.defaults, path)
+ if val != nil {
+ return val
+ }
+ if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
+ return nil
+ }
+
+ // last chance: if no other value is returned and a flag does exist for the value,
+ // get the flag's value even if the flag's value has not changed
+ if flag, exists := v.pflags[lcaseKey]; exists {
+ switch flag.ValueType() {
+ case "int", "int8", "int16", "int32", "int64":
+ return cast.ToInt(flag.ValueString())
+ case "bool":
+ return cast.ToBool(flag.ValueString())
+ case "stringSlice":
+ s := strings.TrimPrefix(flag.ValueString(), "[")
+ return strings.TrimSuffix(s, "]")
+ default:
+ return flag.ValueString()
+ }
+ }
+ // last item, no need to check shadowing
+
+ return nil
+}
+
+// IsSet checks to see if the key has been set in any of the data locations.
+// IsSet is case-insensitive for a key.
+func IsSet(key string) bool { return v.IsSet(key) }
+func (v *Viper) IsSet(key string) bool {
+ lcaseKey := strings.ToLower(key)
+ val := v.find(lcaseKey)
+ return val != nil
+}
+
+// AutomaticEnv has Viper check ENV variables for all.
+// keys set in config, default & flags
+func AutomaticEnv() { v.AutomaticEnv() }
+func (v *Viper) AutomaticEnv() {
+ v.automaticEnvApplied = true
+}
+
+// SetEnvKeyReplacer sets the strings.Replacer on the viper object
+// Useful for mapping an environmental variable to a key that does
+// not match it.
+func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
+func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
+ v.envKeyReplacer = r
+}
+
+// Aliases provide another accessor for the same key.
+// This enables one to change a name without breaking the application
+func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
+func (v *Viper) RegisterAlias(alias string, key string) {
+ v.registerAlias(alias, strings.ToLower(key))
+}
+
+func (v *Viper) registerAlias(alias string, key string) {
+ alias = strings.ToLower(alias)
+ if alias != key && alias != v.realKey(key) {
+ _, exists := v.aliases[alias]
+
+ if !exists {
+ // if we alias something that exists in one of the maps to another
+ // name, we'll never be able to get that value using the original
+ // name, so move the config value to the new realkey.
+ if val, ok := v.config[alias]; ok {
+ delete(v.config, alias)
+ v.config[key] = val
+ }
+ if val, ok := v.kvstore[alias]; ok {
+ delete(v.kvstore, alias)
+ v.kvstore[key] = val
+ }
+ if val, ok := v.defaults[alias]; ok {
+ delete(v.defaults, alias)
+ v.defaults[key] = val
+ }
+ if val, ok := v.override[alias]; ok {
+ delete(v.override, alias)
+ v.override[key] = val
+ }
+ v.aliases[alias] = key
+ }
+ } else {
+ jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key))
+ }
+}
+
+func (v *Viper) realKey(key string) string {
+ newkey, exists := v.aliases[key]
+ if exists {
+ jww.DEBUG.Println("Alias", key, "to", newkey)
+ return v.realKey(newkey)
+ }
+ return key
+}
+
+// InConfig checks to see if the given key (or an alias) is in the config file.
+func InConfig(key string) bool { return v.InConfig(key) }
+func (v *Viper) InConfig(key string) bool {
+ // if the requested key is an alias, then return the proper key
+ key = v.realKey(key)
+
+ _, exists := v.config[key]
+ return exists
+}
+
+// SetDefault sets the default value for this key.
+// SetDefault is case-insensitive for a key.
+// Default only used when no value is provided by the user via flag, config or ENV.
+func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
+func (v *Viper) SetDefault(key string, value interface{}) {
+ // If alias passed in, then set the proper default
+ key = v.realKey(strings.ToLower(key))
+ value = toCaseInsensitiveValue(value)
+
+ path := strings.Split(key, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
+
+ // set innermost value
+ deepestMap[lastKey] = value
+}
+
+// Set sets the value for the key in the override regiser.
+// Set is case-insensitive for a key.
+// Will be used instead of values obtained via
+// flags, config file, ENV, default, or key/value store.
+func Set(key string, value interface{}) { v.Set(key, value) }
+func (v *Viper) Set(key string, value interface{}) {
+ // If alias passed in, then set the proper override
+ key = v.realKey(strings.ToLower(key))
+ value = toCaseInsensitiveValue(value)
+
+ path := strings.Split(key, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(v.override, path[0:len(path)-1])
+
+ // set innermost value
+ deepestMap[lastKey] = value
+}
+
+// ReadInConfig will discover and load the configuration file from disk
+// and key/value stores, searching in one of the defined paths.
+func ReadInConfig() error { return v.ReadInConfig() }
+func (v *Viper) ReadInConfig() error {
+ jww.INFO.Println("Attempting to read in config file")
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+
+ if !stringInSlice(v.getConfigType(), SupportedExts) {
+ return UnsupportedConfigError(v.getConfigType())
+ }
+
+ file, err := afero.ReadFile(v.fs, filename)
+ if err != nil {
+ return err
+ }
+
+ config := make(map[string]interface{})
+
+ err = v.unmarshalReader(bytes.NewReader(file), config)
+ if err != nil {
+ return err
+ }
+
+ v.config = config
+ return nil
+}
+
+// MergeInConfig merges a new configuration with an existing config.
+func MergeInConfig() error { return v.MergeInConfig() }
+func (v *Viper) MergeInConfig() error {
+ jww.INFO.Println("Attempting to merge in config file")
+ filename, err := v.getConfigFile()
+ if err != nil {
+ return err
+ }
+
+ if !stringInSlice(v.getConfigType(), SupportedExts) {
+ return UnsupportedConfigError(v.getConfigType())
+ }
+
+ file, err := afero.ReadFile(v.fs, filename)
+ if err != nil {
+ return err
+ }
+
+ return v.MergeConfig(bytes.NewReader(file))
+}
+
+// ReadConfig will read a configuration file, setting existing keys to nil if the
+// key does not exist in the file.
+func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
+func (v *Viper) ReadConfig(in io.Reader) error {
+ v.config = make(map[string]interface{})
+ return v.unmarshalReader(in, v.config)
+}
+
+// MergeConfig merges a new configuration with an existing config.
+func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
+func (v *Viper) MergeConfig(in io.Reader) error {
+ if v.config == nil {
+ v.config = make(map[string]interface{})
+ }
+ cfg := make(map[string]interface{})
+ if err := v.unmarshalReader(in, cfg); err != nil {
+ return err
+ }
+ mergeMaps(cfg, v.config, nil)
+ return nil
+}
+
+func keyExists(k string, m map[string]interface{}) string {
+ lk := strings.ToLower(k)
+ for mk := range m {
+ lmk := strings.ToLower(mk)
+ if lmk == lk {
+ return mk
+ }
+ }
+ return ""
+}
+
+func castToMapStringInterface(
+ src map[interface{}]interface{}) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[fmt.Sprintf("%v", k)] = v
+ }
+ return tgt
+}
+
+func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
+ tgt := map[string]interface{}{}
+ for k, v := range src {
+ tgt[k] = v
+ }
+ return tgt
+}
+
+// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
+// insistence on parsing nested structures as `map[interface{}]interface{}`
+// instead of using a `string` as the key for nest structures beyond one level
+// deep. Both map types are supported as there is a go-yaml fork that uses
+// `map[string]interface{}` instead.
+func mergeMaps(
+ src, tgt map[string]interface{}, itgt map[interface{}]interface{}) {
+ for sk, sv := range src {
+ tk := keyExists(sk, tgt)
+ if tk == "" {
+ jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv)
+ tgt[sk] = sv
+ if itgt != nil {
+ itgt[sk] = sv
+ }
+ continue
+ }
+
+ tv, ok := tgt[tk]
+ if !ok {
+ jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv)
+ tgt[sk] = sv
+ if itgt != nil {
+ itgt[sk] = sv
+ }
+ continue
+ }
+
+ svType := reflect.TypeOf(sv)
+ tvType := reflect.TypeOf(tv)
+ if svType != tvType {
+ jww.ERROR.Printf(
+ "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+ sk, svType, tvType, sv, tv)
+ continue
+ }
+
+ jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+ sk, svType, tvType, sv, tv)
+
+ switch ttv := tv.(type) {
+ case map[interface{}]interface{}:
+ jww.TRACE.Printf("merging maps (must convert)")
+ tsv := sv.(map[interface{}]interface{})
+ ssv := castToMapStringInterface(tsv)
+ stv := castToMapStringInterface(ttv)
+ mergeMaps(ssv, stv, ttv)
+ case map[string]interface{}:
+ jww.TRACE.Printf("merging maps")
+ mergeMaps(sv.(map[string]interface{}), ttv, nil)
+ default:
+ jww.TRACE.Printf("setting value")
+ tgt[tk] = sv
+ if itgt != nil {
+ itgt[tk] = sv
+ }
+ }
+ }
+}
+
+// ReadRemoteConfig attempts to get configuration from a remote source
+// and read it in the remote configuration registry.
+func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
+func (v *Viper) ReadRemoteConfig() error {
+ return v.getKeyValueConfig()
+}
+
+func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
+func (v *Viper) WatchRemoteConfig() error {
+ return v.watchKeyValueConfig()
+}
+
+func (v *Viper) WatchRemoteConfigOnChannel() error {
+ return v.watchKeyValueConfigOnChannel()
+}
+
+// Unmarshall a Reader into a map.
+// Should probably be an unexported function.
+func unmarshalReader(in io.Reader, c map[string]interface{}) error {
+ return v.unmarshalReader(in, c)
+}
+
+func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
+ return unmarshallConfigReader(in, c, v.getConfigType())
+}
+
+func (v *Viper) insensitiviseMaps() {
+ insensitiviseMap(v.config)
+ insensitiviseMap(v.defaults)
+ insensitiviseMap(v.override)
+ insensitiviseMap(v.kvstore)
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) getKeyValueConfig() error {
+ if RemoteConfig == nil {
+ return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
+ }
+
+ for _, rp := range v.remoteProviders {
+ val, err := v.getRemoteConfig(rp)
+ if err != nil {
+ continue
+ }
+ v.kvstore = val
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+ reader, err := RemoteConfig.Get(provider)
+ if err != nil {
+ return nil, err
+ }
+ err = v.unmarshalReader(reader, v.kvstore)
+ return v.kvstore, err
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfigOnChannel() error {
+ for _, rp := range v.remoteProviders {
+ respc, _ := RemoteConfig.WatchChannel(rp)
+ //Todo: Add quit channel
+ go func(rc <-chan *RemoteResponse) {
+ for {
+ b := <-rc
+ reader := bytes.NewReader(b.Value)
+ v.unmarshalReader(reader, v.kvstore)
+ }
+ }(respc)
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfig() error {
+ for _, rp := range v.remoteProviders {
+ val, err := v.watchRemoteConfig(rp)
+ if err != nil {
+ continue
+ }
+ v.kvstore = val
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+ reader, err := RemoteConfig.Watch(provider)
+ if err != nil {
+ return nil, err
+ }
+ err = v.unmarshalReader(reader, v.kvstore)
+ return v.kvstore, err
+}
+
+// AllKeys returns all keys holding a value, regardless of where they are set.
+// Nested keys are returned with a v.keyDelim (= ".") separator
+func AllKeys() []string { return v.AllKeys() }
+func (v *Viper) AllKeys() []string {
+ m := map[string]bool{}
+ // add all paths, by order of descending priority to ensure correct shadowing
+ m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
+ m = v.flattenAndMergeMap(m, v.override, "")
+ m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
+ m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env))
+ m = v.flattenAndMergeMap(m, v.config, "")
+ m = v.flattenAndMergeMap(m, v.kvstore, "")
+ m = v.flattenAndMergeMap(m, v.defaults, "")
+
+ // convert set of paths to list
+ a := []string{}
+ for x := range m {
+ a = append(a, x)
+ }
+ return a
+}
+
+// flattenAndMergeMap recursively flattens the given map into a map[string]bool
+// of key paths (used as a set, easier to manipulate than a []string):
+// - each path is merged into a single key string, delimited with v.keyDelim (= ".")
+// - if a path is shadowed by an earlier value in the initial shadow map,
+// it is skipped.
+// The resulting set of paths is merged to the given shadow set at the same time.
+func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
+ if shadow != nil && prefix != "" && shadow[prefix] {
+ // prefix is shadowed => nothing more to flatten
+ return shadow
+ }
+ if shadow == nil {
+ shadow = make(map[string]bool)
+ }
+
+ var m2 map[string]interface{}
+ if prefix != "" {
+ prefix += v.keyDelim
+ }
+ for k, val := range m {
+ fullKey := prefix + k
+ switch val.(type) {
+ case map[string]interface{}:
+ m2 = val.(map[string]interface{})
+ case map[interface{}]interface{}:
+ m2 = cast.ToStringMap(val)
+ default:
+ // immediate value
+ shadow[strings.ToLower(fullKey)] = true
+ continue
+ }
+ // recursively merge to shadow map
+ shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
+ }
+ return shadow
+}
+
+// mergeFlatMap merges the given maps, excluding values of the second map
+// shadowed by values from the first map.
+func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
+ // scan keys
+outer:
+ for k, _ := range m {
+ path := strings.Split(k, v.keyDelim)
+ // scan intermediate paths
+ var parentKey string
+ for i := 1; i < len(path); i++ {
+ parentKey = strings.Join(path[0:i], v.keyDelim)
+ if shadow[parentKey] {
+ // path is shadowed, continue
+ continue outer
+ }
+ }
+ // add key
+ shadow[strings.ToLower(k)] = true
+ }
+ return shadow
+}
+
+// AllSettings merges all settings and returns them as a map[string]interface{}.
+func AllSettings() map[string]interface{} { return v.AllSettings() }
+func (v *Viper) AllSettings() map[string]interface{} {
+ m := map[string]interface{}{}
+ // start from the list of keys, and construct the map one value at a time
+ for _, k := range v.AllKeys() {
+ value := v.Get(k)
+ if value == nil {
+ // should not happen, since AllKeys() returns only keys holding a value,
+ // check just in case anything changes
+ continue
+ }
+ path := strings.Split(k, v.keyDelim)
+ lastKey := strings.ToLower(path[len(path)-1])
+ deepestMap := deepSearch(m, path[0:len(path)-1])
+ // set innermost value
+ deepestMap[lastKey] = value
+ }
+ return m
+}
+
+// SetFs sets the filesystem to use to read configuration.
+func SetFs(fs afero.Fs) { v.SetFs(fs) }
+func (v *Viper) SetFs(fs afero.Fs) {
+ v.fs = fs
+}
+
+// SetConfigName sets name for the config file.
+// Does not include extension.
+func SetConfigName(in string) { v.SetConfigName(in) }
+func (v *Viper) SetConfigName(in string) {
+ if in != "" {
+ v.configName = in
+ v.configFile = ""
+ }
+}
+
+// SetConfigType sets the type of the configuration returned by the
+// remote source, e.g. "json".
+func SetConfigType(in string) { v.SetConfigType(in) }
+func (v *Viper) SetConfigType(in string) {
+ if in != "" {
+ v.configType = in
+ }
+}
+
+func (v *Viper) getConfigType() string {
+ if v.configType != "" {
+ return v.configType
+ }
+
+ cf, err := v.getConfigFile()
+ if err != nil {
+ return ""
+ }
+
+ ext := filepath.Ext(cf)
+
+ if len(ext) > 1 {
+ return ext[1:]
+ }
+
+ return ""
+}
+
+func (v *Viper) getConfigFile() (string, error) {
+ // if explicitly set, then use it
+ if v.configFile != "" {
+ return v.configFile, nil
+ }
+
+ cf, err := v.findConfigFile()
+ if err != nil {
+ return "", err
+ }
+
+ v.configFile = cf
+ return v.getConfigFile()
+}
+
+func (v *Viper) searchInPath(in string) (filename string) {
+ jww.DEBUG.Println("Searching for config in ", in)
+ for _, ext := range SupportedExts {
+ jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext))
+ if b, _ := exists(filepath.Join(in, v.configName+"."+ext)); b {
+ jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext))
+ return filepath.Join(in, v.configName+"."+ext)
+ }
+ }
+
+ return ""
+}
+
+// Search all configPaths for any config file.
+// Returns the first path that exists (and is a config file).
+func (v *Viper) findConfigFile() (string, error) {
+
+ jww.INFO.Println("Searching for config in ", v.configPaths)
+
+ for _, cp := range v.configPaths {
+ file := v.searchInPath(cp)
+ if file != "" {
+ return file, nil
+ }
+ }
+ return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
+}
+
+// Debug prints all configuration registries for debugging
+// purposes.
+func Debug() { v.Debug() }
+func (v *Viper) Debug() {
+ fmt.Printf("Aliases:\n%#v\n", v.aliases)
+ fmt.Printf("Override:\n%#v\n", v.override)
+ fmt.Printf("PFlags:\n%#v\n", v.pflags)
+ fmt.Printf("Env:\n%#v\n", v.env)
+ fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore)
+ fmt.Printf("Config:\n%#v\n", v.config)
+ fmt.Printf("Defaults:\n%#v\n", v.defaults)
+}
diff --git a/vendor/github.com/spf13/viper/viper_test.go b/vendor/github.com/spf13/viper/viper_test.go
new file mode 100644
index 000000000..cd7b65cbd
--- /dev/null
+++ b/vendor/github.com/spf13/viper/viper_test.go
@@ -0,0 +1,1154 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package viper
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/spf13/cast"
+
+ "github.com/spf13/pflag"
+ "github.com/stretchr/testify/assert"
+)
+
+var yamlExample = []byte(`Hacker: true
+name: steve
+hobbies:
+- skateboarding
+- snowboarding
+- go
+clothing:
+ jacket: leather
+ trousers: denim
+ pants:
+ size: large
+age: 35
+eyes : brown
+beard: true
+`)
+
+var yamlExampleWithExtras = []byte(`Existing: true
+Bogus: true
+`)
+
+type testUnmarshalExtra struct {
+ Existing bool
+}
+
+var tomlExample = []byte(`
+title = "TOML Example"
+
+[owner]
+organization = "MongoDB"
+Bio = "MongoDB Chief Developer Advocate & Hacker at Large"
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?`)
+
+var jsonExample = []byte(`{
+"id": "0001",
+"type": "donut",
+"name": "Cake",
+"ppu": 0.55,
+"batters": {
+ "batter": [
+ { "type": "Regular" },
+ { "type": "Chocolate" },
+ { "type": "Blueberry" },
+ { "type": "Devil's Food" }
+ ]
+ }
+}`)
+
+var hclExample = []byte(`
+id = "0001"
+type = "donut"
+name = "Cake"
+ppu = 0.55
+foos {
+ foo {
+ key = 1
+ }
+ foo {
+ key = 2
+ }
+ foo {
+ key = 3
+ }
+ foo {
+ key = 4
+ }
+}`)
+
+var propertiesExample = []byte(`
+p_id: 0001
+p_type: donut
+p_name: Cake
+p_ppu: 0.55
+p_batters.batter.type: Regular
+`)
+
+var remoteExample = []byte(`{
+"id":"0002",
+"type":"cronut",
+"newkey":"remote"
+}`)
+
+func initConfigs() {
+ Reset()
+ var r io.Reader
+ SetConfigType("yaml")
+ r = bytes.NewReader(yamlExample)
+ unmarshalReader(r, v.config)
+
+ SetConfigType("json")
+ r = bytes.NewReader(jsonExample)
+ unmarshalReader(r, v.config)
+
+ SetConfigType("hcl")
+ r = bytes.NewReader(hclExample)
+ unmarshalReader(r, v.config)
+
+ SetConfigType("properties")
+ r = bytes.NewReader(propertiesExample)
+ unmarshalReader(r, v.config)
+
+ SetConfigType("toml")
+ r = bytes.NewReader(tomlExample)
+ unmarshalReader(r, v.config)
+
+ SetConfigType("json")
+ remote := bytes.NewReader(remoteExample)
+ unmarshalReader(remote, v.kvstore)
+}
+
+func initConfig(typ, config string) {
+ Reset()
+ SetConfigType(typ)
+ r := strings.NewReader(config)
+
+ if err := unmarshalReader(r, v.config); err != nil {
+ panic(err)
+ }
+}
+
+func initYAML() {
+ initConfig("yaml", string(yamlExample))
+}
+
+func initJSON() {
+ Reset()
+ SetConfigType("json")
+ r := bytes.NewReader(jsonExample)
+
+ unmarshalReader(r, v.config)
+}
+
+func initProperties() {
+ Reset()
+ SetConfigType("properties")
+ r := bytes.NewReader(propertiesExample)
+
+ unmarshalReader(r, v.config)
+}
+
+func initTOML() {
+ Reset()
+ SetConfigType("toml")
+ r := bytes.NewReader(tomlExample)
+
+ unmarshalReader(r, v.config)
+}
+
+func initHcl() {
+ Reset()
+ SetConfigType("hcl")
+ r := bytes.NewReader(hclExample)
+
+ unmarshalReader(r, v.config)
+}
+
+// make directories for testing
+func initDirs(t *testing.T) (string, string, func()) {
+
+ var (
+ testDirs = []string{`a a`, `b`, `c\c`, `D_`}
+ config = `improbable`
+ )
+
+ root, err := ioutil.TempDir("", "")
+
+ cleanup := true
+ defer func() {
+ if cleanup {
+ os.Chdir("..")
+ os.RemoveAll(root)
+ }
+ }()
+
+ assert.Nil(t, err)
+
+ err = os.Chdir(root)
+ assert.Nil(t, err)
+
+ for _, dir := range testDirs {
+ err = os.Mkdir(dir, 0750)
+ assert.Nil(t, err)
+
+ err = ioutil.WriteFile(
+ path.Join(dir, config+".toml"),
+ []byte("key = \"value is "+dir+"\"\n"),
+ 0640)
+ assert.Nil(t, err)
+ }
+
+ cleanup = false
+ return root, config, func() {
+ os.Chdir("..")
+ os.RemoveAll(root)
+ }
+}
+
+//stubs for PFlag Values
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string {
+ return fmt.Sprintf("%s", *s)
+}
+
+func TestBasics(t *testing.T) {
+ SetConfigFile("/tmp/config.yaml")
+ filename, err := v.getConfigFile()
+ assert.Equal(t, "/tmp/config.yaml", filename)
+ assert.NoError(t, err)
+}
+
+func TestDefault(t *testing.T) {
+ SetDefault("age", 45)
+ assert.Equal(t, 45, Get("age"))
+
+ SetDefault("clothing.jacket", "slacks")
+ assert.Equal(t, "slacks", Get("clothing.jacket"))
+
+ SetConfigType("yaml")
+ err := ReadConfig(bytes.NewBuffer(yamlExample))
+
+ assert.NoError(t, err)
+ assert.Equal(t, "leather", Get("clothing.jacket"))
+}
+
+func TestUnmarshalling(t *testing.T) {
+ SetConfigType("yaml")
+ r := bytes.NewReader(yamlExample)
+
+ unmarshalReader(r, v.config)
+ assert.True(t, InConfig("name"))
+ assert.False(t, InConfig("state"))
+ assert.Equal(t, "steve", Get("name"))
+ assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, Get("hobbies"))
+ assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, Get("clothing"))
+ assert.Equal(t, 35, Get("age"))
+}
+
+func TestUnmarshalExact(t *testing.T) {
+ vip := New()
+ target := &testUnmarshalExtra{}
+ vip.SetConfigType("yaml")
+ r := bytes.NewReader(yamlExampleWithExtras)
+ vip.ReadConfig(r)
+ err := vip.UnmarshalExact(target)
+ if err == nil {
+ t.Fatal("UnmarshalExact should error when populating a struct from a conf that contains unused fields")
+ }
+}
+
+func TestOverrides(t *testing.T) {
+ Set("age", 40)
+ assert.Equal(t, 40, Get("age"))
+}
+
+func TestDefaultPost(t *testing.T) {
+ assert.NotEqual(t, "NYC", Get("state"))
+ SetDefault("state", "NYC")
+ assert.Equal(t, "NYC", Get("state"))
+}
+
+func TestAliases(t *testing.T) {
+ RegisterAlias("years", "age")
+ assert.Equal(t, 40, Get("years"))
+ Set("years", 45)
+ assert.Equal(t, 45, Get("age"))
+}
+
+func TestAliasInConfigFile(t *testing.T) {
+ // the config file specifies "beard". If we make this an alias for
+ // "hasbeard", we still want the old config file to work with beard.
+ RegisterAlias("beard", "hasbeard")
+ assert.Equal(t, true, Get("hasbeard"))
+ Set("hasbeard", false)
+ assert.Equal(t, false, Get("beard"))
+}
+
+func TestYML(t *testing.T) {
+ initYAML()
+ assert.Equal(t, "steve", Get("name"))
+}
+
+func TestJSON(t *testing.T) {
+ initJSON()
+ assert.Equal(t, "0001", Get("id"))
+}
+
+func TestProperties(t *testing.T) {
+ initProperties()
+ assert.Equal(t, "0001", Get("p_id"))
+}
+
+func TestTOML(t *testing.T) {
+ initTOML()
+ assert.Equal(t, "TOML Example", Get("title"))
+}
+
+func TestHCL(t *testing.T) {
+ initHcl()
+ assert.Equal(t, "0001", Get("id"))
+ assert.Equal(t, 0.55, Get("ppu"))
+ assert.Equal(t, "donut", Get("type"))
+ assert.Equal(t, "Cake", Get("name"))
+ Set("id", "0002")
+ assert.Equal(t, "0002", Get("id"))
+ assert.NotEqual(t, "cronut", Get("type"))
+}
+
+func TestRemotePrecedence(t *testing.T) {
+ initJSON()
+
+ remote := bytes.NewReader(remoteExample)
+ assert.Equal(t, "0001", Get("id"))
+ unmarshalReader(remote, v.kvstore)
+ assert.Equal(t, "0001", Get("id"))
+ assert.NotEqual(t, "cronut", Get("type"))
+ assert.Equal(t, "remote", Get("newkey"))
+ Set("newkey", "newvalue")
+ assert.NotEqual(t, "remote", Get("newkey"))
+ assert.Equal(t, "newvalue", Get("newkey"))
+ Set("newkey", "remote")
+}
+
+func TestEnv(t *testing.T) {
+ initJSON()
+
+ BindEnv("id")
+ BindEnv("f", "FOOD")
+
+ os.Setenv("ID", "13")
+ os.Setenv("FOOD", "apple")
+ os.Setenv("NAME", "crunk")
+
+ assert.Equal(t, "13", Get("id"))
+ assert.Equal(t, "apple", Get("f"))
+ assert.Equal(t, "Cake", Get("name"))
+
+ AutomaticEnv()
+
+ assert.Equal(t, "crunk", Get("name"))
+
+}
+
+func TestEnvPrefix(t *testing.T) {
+ initJSON()
+
+ SetEnvPrefix("foo") // will be uppercased automatically
+ BindEnv("id")
+ BindEnv("f", "FOOD") // not using prefix
+
+ os.Setenv("FOO_ID", "13")
+ os.Setenv("FOOD", "apple")
+ os.Setenv("FOO_NAME", "crunk")
+
+ assert.Equal(t, "13", Get("id"))
+ assert.Equal(t, "apple", Get("f"))
+ assert.Equal(t, "Cake", Get("name"))
+
+ AutomaticEnv()
+
+ assert.Equal(t, "crunk", Get("name"))
+}
+
+func TestAutoEnv(t *testing.T) {
+ Reset()
+
+ AutomaticEnv()
+ os.Setenv("FOO_BAR", "13")
+ assert.Equal(t, "13", Get("foo_bar"))
+}
+
+func TestAutoEnvWithPrefix(t *testing.T) {
+ Reset()
+
+ AutomaticEnv()
+ SetEnvPrefix("Baz")
+ os.Setenv("BAZ_BAR", "13")
+ assert.Equal(t, "13", Get("bar"))
+}
+
+func TestSetEnvReplacer(t *testing.T) {
+ Reset()
+
+ AutomaticEnv()
+ os.Setenv("REFRESH_INTERVAL", "30s")
+
+ replacer := strings.NewReplacer("-", "_")
+ SetEnvKeyReplacer(replacer)
+
+ assert.Equal(t, "30s", Get("refresh-interval"))
+}
+
+func TestAllKeys(t *testing.T) {
+ initConfigs()
+
+ ks := sort.StringSlice{"title", "newkey", "owner.organization", "owner.dob", "owner.bio", "name", "beard", "ppu", "batters.batter", "hobbies", "clothing.jacket", "clothing.trousers", "clothing.pants.size", "age", "hacker", "id", "type", "eyes", "p_id", "p_ppu", "p_batters.batter.type", "p_type", "p_name", "foos"}
+ dob, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z")
+ all := map[string]interface{}{"owner": map[string]interface{}{"organization": "MongoDB", "bio": "MongoDB Chief Developer Advocate & Hacker at Large", "dob": dob}, "title": "TOML Example", "ppu": 0.55, "eyes": "brown", "clothing": map[string]interface{}{"trousers": "denim", "jacket": "leather", "pants": map[string]interface{}{"size": "large"}}, "id": "0001", "batters": map[string]interface{}{"batter": []interface{}{map[string]interface{}{"type": "Regular"}, map[string]interface{}{"type": "Chocolate"}, map[string]interface{}{"type": "Blueberry"}, map[string]interface{}{"type": "Devil's Food"}}}, "hacker": true, "beard": true, "hobbies": []interface{}{"skateboarding", "snowboarding", "go"}, "age": 35, "type": "donut", "newkey": "remote", "name": "Cake", "p_id": "0001", "p_ppu": "0.55", "p_name": "Cake", "p_batters": map[string]interface{}{"batter": map[string]interface{}{"type": "Regular"}}, "p_type": "donut", "foos": []map[string]interface{}{map[string]interface{}{"foo": []map[string]interface{}{map[string]interface{}{"key": 1}, map[string]interface{}{"key": 2}, map[string]interface{}{"key": 3}, map[string]interface{}{"key": 4}}}}}
+
+ var allkeys sort.StringSlice
+ allkeys = AllKeys()
+ allkeys.Sort()
+ ks.Sort()
+
+ assert.Equal(t, ks, allkeys)
+ assert.Equal(t, all, AllSettings())
+}
+
+func TestAllKeysWithEnv(t *testing.T) {
+ v := New()
+
+ // bind and define environment variables (including a nested one)
+ v.BindEnv("id")
+ v.BindEnv("foo.bar")
+ v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
+ os.Setenv("ID", "13")
+ os.Setenv("FOO_BAR", "baz")
+
+ expectedKeys := sort.StringSlice{"id", "foo.bar"}
+ expectedKeys.Sort()
+ keys := sort.StringSlice(v.AllKeys())
+ keys.Sort()
+ assert.Equal(t, expectedKeys, keys)
+}
+
+func TestAliasesOfAliases(t *testing.T) {
+ Set("Title", "Checking Case")
+ RegisterAlias("Foo", "Bar")
+ RegisterAlias("Bar", "Title")
+ assert.Equal(t, "Checking Case", Get("FOO"))
+}
+
+func TestRecursiveAliases(t *testing.T) {
+ RegisterAlias("Baz", "Roo")
+ RegisterAlias("Roo", "baz")
+}
+
+func TestUnmarshal(t *testing.T) {
+ SetDefault("port", 1313)
+ Set("name", "Steve")
+ Set("duration", "1s1ms")
+
+ type config struct {
+ Port int
+ Name string
+ Duration time.Duration
+ }
+
+ var C config
+
+ err := Unmarshal(&C)
+ if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+ }
+
+ assert.Equal(t, &config{Name: "Steve", Port: 1313, Duration: time.Second + time.Millisecond}, &C)
+
+ Set("port", 1234)
+ err = Unmarshal(&C)
+ if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+ }
+ assert.Equal(t, &config{Name: "Steve", Port: 1234, Duration: time.Second + time.Millisecond}, &C)
+}
+
+func TestBindPFlags(t *testing.T) {
+ v := New() // create independent Viper object
+ flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError)
+
+ var testValues = map[string]*string{
+ "host": nil,
+ "port": nil,
+ "endpoint": nil,
+ }
+
+ var mutatedTestValues = map[string]string{
+ "host": "localhost",
+ "port": "6060",
+ "endpoint": "/public",
+ }
+
+ for name := range testValues {
+ testValues[name] = flagSet.String(name, "", "test")
+ }
+
+ err := v.BindPFlags(flagSet)
+ if err != nil {
+ t.Fatalf("error binding flag set, %v", err)
+ }
+
+ flagSet.VisitAll(func(flag *pflag.Flag) {
+ flag.Value.Set(mutatedTestValues[flag.Name])
+ flag.Changed = true
+ })
+
+ for name, expected := range mutatedTestValues {
+ assert.Equal(t, expected, v.Get(name))
+ }
+
+}
+
+func TestBindPFlag(t *testing.T) {
+ var testString = "testing"
+ var testValue = newStringValue(testString, &testString)
+
+ flag := &pflag.Flag{
+ Name: "testflag",
+ Value: testValue,
+ Changed: false,
+ }
+
+ BindPFlag("testvalue", flag)
+
+ assert.Equal(t, testString, Get("testvalue"))
+
+ flag.Value.Set("testing_mutate")
+ flag.Changed = true //hack for pflag usage
+
+ assert.Equal(t, "testing_mutate", Get("testvalue"))
+
+}
+
+func TestBoundCaseSensitivity(t *testing.T) {
+ assert.Equal(t, "brown", Get("eyes"))
+
+ BindEnv("eYEs", "TURTLE_EYES")
+ os.Setenv("TURTLE_EYES", "blue")
+
+ assert.Equal(t, "blue", Get("eyes"))
+
+ var testString = "green"
+ var testValue = newStringValue(testString, &testString)
+
+ flag := &pflag.Flag{
+ Name: "eyeballs",
+ Value: testValue,
+ Changed: true,
+ }
+
+ BindPFlag("eYEs", flag)
+ assert.Equal(t, "green", Get("eyes"))
+
+}
+
+func TestSizeInBytes(t *testing.T) {
+ input := map[string]uint{
+ "": 0,
+ "b": 0,
+ "12 bytes": 0,
+ "200000000000gb": 0,
+ "12 b": 12,
+ "43 MB": 43 * (1 << 20),
+ "10mb": 10 * (1 << 20),
+ "1gb": 1 << 30,
+ }
+
+ for str, expected := range input {
+ assert.Equal(t, expected, parseSizeInBytes(str), str)
+ }
+}
+
+func TestFindsNestedKeys(t *testing.T) {
+ initConfigs()
+ dob, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z")
+
+ Set("super", map[string]interface{}{
+ "deep": map[string]interface{}{
+ "nested": "value",
+ },
+ })
+
+ expected := map[string]interface{}{
+ "super": map[string]interface{}{
+ "deep": map[string]interface{}{
+ "nested": "value",
+ },
+ },
+ "super.deep": map[string]interface{}{
+ "nested": "value",
+ },
+ "super.deep.nested": "value",
+ "owner.organization": "MongoDB",
+ "batters.batter": []interface{}{
+ map[string]interface{}{
+ "type": "Regular",
+ },
+ map[string]interface{}{
+ "type": "Chocolate",
+ },
+ map[string]interface{}{
+ "type": "Blueberry",
+ },
+ map[string]interface{}{
+ "type": "Devil's Food",
+ },
+ },
+ "hobbies": []interface{}{
+ "skateboarding", "snowboarding", "go",
+ },
+ "title": "TOML Example",
+ "newkey": "remote",
+ "batters": map[string]interface{}{
+ "batter": []interface{}{
+ map[string]interface{}{
+ "type": "Regular",
+ },
+ map[string]interface{}{
+ "type": "Chocolate",
+ }, map[string]interface{}{
+ "type": "Blueberry",
+ }, map[string]interface{}{
+ "type": "Devil's Food",
+ },
+ },
+ },
+ "eyes": "brown",
+ "age": 35,
+ "owner": map[string]interface{}{
+ "organization": "MongoDB",
+ "bio": "MongoDB Chief Developer Advocate & Hacker at Large",
+ "dob": dob,
+ },
+ "owner.bio": "MongoDB Chief Developer Advocate & Hacker at Large",
+ "type": "donut",
+ "id": "0001",
+ "name": "Cake",
+ "hacker": true,
+ "ppu": 0.55,
+ "clothing": map[string]interface{}{
+ "jacket": "leather",
+ "trousers": "denim",
+ "pants": map[string]interface{}{
+ "size": "large",
+ },
+ },
+ "clothing.jacket": "leather",
+ "clothing.pants.size": "large",
+ "clothing.trousers": "denim",
+ "owner.dob": dob,
+ "beard": true,
+ "foos": []map[string]interface{}{
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "key": 1,
+ },
+ map[string]interface{}{
+ "key": 2,
+ },
+ map[string]interface{}{
+ "key": 3,
+ },
+ map[string]interface{}{
+ "key": 4,
+ },
+ },
+ },
+ },
+ }
+
+ for key, expectedValue := range expected {
+
+ assert.Equal(t, expectedValue, v.Get(key))
+ }
+
+}
+
+func TestReadBufConfig(t *testing.T) {
+ v := New()
+ v.SetConfigType("yaml")
+ v.ReadConfig(bytes.NewBuffer(yamlExample))
+ t.Log(v.AllKeys())
+
+ assert.True(t, v.InConfig("name"))
+ assert.False(t, v.InConfig("state"))
+ assert.Equal(t, "steve", v.Get("name"))
+ assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, v.Get("hobbies"))
+ assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, v.Get("clothing"))
+ assert.Equal(t, 35, v.Get("age"))
+}
+
+func TestIsSet(t *testing.T) {
+ v := New()
+ v.SetConfigType("yaml")
+ v.ReadConfig(bytes.NewBuffer(yamlExample))
+ assert.True(t, v.IsSet("clothing.jacket"))
+ assert.False(t, v.IsSet("clothing.jackets"))
+ assert.False(t, v.IsSet("helloworld"))
+ v.Set("helloworld", "fubar")
+ assert.True(t, v.IsSet("helloworld"))
+}
+
+func TestDirsSearch(t *testing.T) {
+
+ root, config, cleanup := initDirs(t)
+ defer cleanup()
+
+ v := New()
+ v.SetConfigName(config)
+ v.SetDefault(`key`, `default`)
+
+ entries, err := ioutil.ReadDir(root)
+ for _, e := range entries {
+ if e.IsDir() {
+ v.AddConfigPath(e.Name())
+ }
+ }
+
+ err = v.ReadInConfig()
+ assert.Nil(t, err)
+
+ assert.Equal(t, `value is `+path.Base(v.configPaths[0]), v.GetString(`key`))
+}
+
+func TestWrongDirsSearchNotFound(t *testing.T) {
+
+ _, config, cleanup := initDirs(t)
+ defer cleanup()
+
+ v := New()
+ v.SetConfigName(config)
+ v.SetDefault(`key`, `default`)
+
+ v.AddConfigPath(`whattayoutalkingbout`)
+ v.AddConfigPath(`thispathaintthere`)
+
+ err := v.ReadInConfig()
+ assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err))
+
+ // Even though config did not load and the error might have
+ // been ignored by the client, the default still loads
+ assert.Equal(t, `default`, v.GetString(`key`))
+}
+
+func TestWrongDirsSearchNotFoundForMerge(t *testing.T) {
+
+ _, config, cleanup := initDirs(t)
+ defer cleanup()
+
+ v := New()
+ v.SetConfigName(config)
+ v.SetDefault(`key`, `default`)
+
+ v.AddConfigPath(`whattayoutalkingbout`)
+ v.AddConfigPath(`thispathaintthere`)
+
+ err := v.MergeInConfig()
+ assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err))
+
+ // Even though config did not load and the error might have
+ // been ignored by the client, the default still loads
+ assert.Equal(t, `default`, v.GetString(`key`))
+}
+
+func TestSub(t *testing.T) {
+ v := New()
+ v.SetConfigType("yaml")
+ v.ReadConfig(bytes.NewBuffer(yamlExample))
+
+ subv := v.Sub("clothing")
+ assert.Equal(t, v.Get("clothing.pants.size"), subv.Get("pants.size"))
+
+ subv = v.Sub("clothing.pants")
+ assert.Equal(t, v.Get("clothing.pants.size"), subv.Get("size"))
+
+ subv = v.Sub("clothing.pants.size")
+ assert.Equal(t, (*Viper)(nil), subv)
+
+ subv = v.Sub("missing.key")
+ assert.Equal(t, (*Viper)(nil), subv)
+}
+
+var yamlMergeExampleTgt = []byte(`
+hello:
+ pop: 37890
+ lagrenum: 765432101234567
+ world:
+ - us
+ - uk
+ - fr
+ - de
+`)
+
+var yamlMergeExampleSrc = []byte(`
+hello:
+ pop: 45000
+ lagrenum: 7654321001234567
+ universe:
+ - mw
+ - ad
+fu: bar
+`)
+
+func TestMergeConfig(t *testing.T) {
+ v := New()
+ v.SetConfigType("yml")
+ if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleTgt)); err != nil {
+ t.Fatal(err)
+ }
+
+ if pop := v.GetInt("hello.pop"); pop != 37890 {
+ t.Fatalf("pop != 37890, = %d", pop)
+ }
+
+ if pop := v.GetInt("hello.lagrenum"); pop != 765432101234567 {
+ t.Fatalf("lagrenum != 765432101234567, = %d", pop)
+ }
+
+ if pop := v.GetInt64("hello.lagrenum"); pop != int64(765432101234567) {
+ t.Fatalf("int64 lagrenum != 765432101234567, = %d", pop)
+ }
+
+ if world := v.GetStringSlice("hello.world"); len(world) != 4 {
+ t.Fatalf("len(world) != 4, = %d", len(world))
+ }
+
+ if fu := v.GetString("fu"); fu != "" {
+ t.Fatalf("fu != \"\", = %s", fu)
+ }
+
+ if err := v.MergeConfig(bytes.NewBuffer(yamlMergeExampleSrc)); err != nil {
+ t.Fatal(err)
+ }
+
+ if pop := v.GetInt("hello.pop"); pop != 45000 {
+ t.Fatalf("pop != 45000, = %d", pop)
+ }
+
+ if pop := v.GetInt("hello.lagrenum"); pop != 7654321001234567 {
+ t.Fatalf("lagrenum != 7654321001234567, = %d", pop)
+ }
+
+ if pop := v.GetInt64("hello.lagrenum"); pop != int64(7654321001234567) {
+ t.Fatalf("int64 lagrenum != 7654321001234567, = %d", pop)
+ }
+
+ if world := v.GetStringSlice("hello.world"); len(world) != 4 {
+ t.Fatalf("len(world) != 4, = %d", len(world))
+ }
+
+ if universe := v.GetStringSlice("hello.universe"); len(universe) != 2 {
+ t.Fatalf("len(universe) != 2, = %d", len(universe))
+ }
+
+ if fu := v.GetString("fu"); fu != "bar" {
+ t.Fatalf("fu != \"bar\", = %s", fu)
+ }
+}
+
+func TestMergeConfigNoMerge(t *testing.T) {
+ v := New()
+ v.SetConfigType("yml")
+ if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleTgt)); err != nil {
+ t.Fatal(err)
+ }
+
+ if pop := v.GetInt("hello.pop"); pop != 37890 {
+ t.Fatalf("pop != 37890, = %d", pop)
+ }
+
+ if world := v.GetStringSlice("hello.world"); len(world) != 4 {
+ t.Fatalf("len(world) != 4, = %d", len(world))
+ }
+
+ if fu := v.GetString("fu"); fu != "" {
+ t.Fatalf("fu != \"\", = %s", fu)
+ }
+
+ if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleSrc)); err != nil {
+ t.Fatal(err)
+ }
+
+ if pop := v.GetInt("hello.pop"); pop != 45000 {
+ t.Fatalf("pop != 45000, = %d", pop)
+ }
+
+ if world := v.GetStringSlice("hello.world"); len(world) != 0 {
+ t.Fatalf("len(world) != 0, = %d", len(world))
+ }
+
+ if universe := v.GetStringSlice("hello.universe"); len(universe) != 2 {
+ t.Fatalf("len(universe) != 2, = %d", len(universe))
+ }
+
+ if fu := v.GetString("fu"); fu != "bar" {
+ t.Fatalf("fu != \"bar\", = %s", fu)
+ }
+}
+
+func TestUnmarshalingWithAliases(t *testing.T) {
+ v := New()
+ v.SetDefault("ID", 1)
+ v.Set("name", "Steve")
+ v.Set("lastname", "Owen")
+
+ v.RegisterAlias("UserID", "ID")
+ v.RegisterAlias("Firstname", "name")
+ v.RegisterAlias("Surname", "lastname")
+
+ type config struct {
+ ID int
+ FirstName string
+ Surname string
+ }
+
+ var C config
+ err := v.Unmarshal(&C)
+ if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+ }
+
+ assert.Equal(t, &config{ID: 1, FirstName: "Steve", Surname: "Owen"}, &C)
+}
+
+func TestSetConfigNameClearsFileCache(t *testing.T) {
+ SetConfigFile("/tmp/config.yaml")
+ SetConfigName("default")
+ f, err := v.getConfigFile()
+ if err == nil {
+ t.Fatalf("config file cache should have been cleared")
+ }
+ assert.Empty(t, f)
+}
+
+func TestShadowedNestedValue(t *testing.T) {
+
+ config := `name: steve
+clothing:
+ jacket: leather
+ trousers: denim
+ pants:
+ size: large
+`
+ initConfig("yaml", config)
+
+ assert.Equal(t, "steve", GetString("name"))
+
+ polyester := "polyester"
+ SetDefault("clothing.shirt", polyester)
+ SetDefault("clothing.jacket.price", 100)
+
+ assert.Equal(t, "leather", GetString("clothing.jacket"))
+ assert.Nil(t, Get("clothing.jacket.price"))
+ assert.Equal(t, polyester, GetString("clothing.shirt"))
+
+ clothingSettings := AllSettings()["clothing"].(map[string]interface{})
+ assert.Equal(t, "leather", clothingSettings["jacket"])
+ assert.Equal(t, polyester, clothingSettings["shirt"])
+}
+
+func TestDotParameter(t *testing.T) {
+ initJSON()
+ // shoud take precedence over batters defined in jsonExample
+ r := bytes.NewReader([]byte(`{ "batters.batter": [ { "type": "Small" } ] }`))
+ unmarshalReader(r, v.config)
+
+ actual := Get("batters.batter")
+ expected := []interface{}{map[string]interface{}{"type": "Small"}}
+ assert.Equal(t, expected, actual)
+}
+
+func TestCaseInsensitive(t *testing.T) {
+ for _, config := range []struct {
+ typ string
+ content string
+ }{
+ {"yaml", `
+aBcD: 1
+eF:
+ gH: 2
+ iJk: 3
+ Lm:
+ nO: 4
+ P:
+ Q: 5
+ R: 6
+`},
+ {"json", `{
+ "aBcD": 1,
+ "eF": {
+ "iJk": 3,
+ "Lm": {
+ "P": {
+ "Q": 5,
+ "R": 6
+ },
+ "nO": 4
+ },
+ "gH": 2
+ }
+}`},
+ {"toml", `aBcD = 1
+[eF]
+gH = 2
+iJk = 3
+[eF.Lm]
+nO = 4
+[eF.Lm.P]
+Q = 5
+R = 6
+`},
+ } {
+ doTestCaseInsensitive(t, config.typ, config.content)
+ }
+}
+
+func TestCaseInsensitiveSet(t *testing.T) {
+ Reset()
+ m1 := map[string]interface{}{
+ "Foo": 32,
+ "Bar": map[interface{}]interface {
+ }{
+ "ABc": "A",
+ "cDE": "B"},
+ }
+
+ m2 := map[string]interface{}{
+ "Foo": 52,
+ "Bar": map[interface{}]interface {
+ }{
+ "bCd": "A",
+ "eFG": "B"},
+ }
+
+ Set("Given1", m1)
+ Set("Number1", 42)
+
+ SetDefault("Given2", m2)
+ SetDefault("Number2", 52)
+
+ // Verify SetDefault
+ if v := Get("number2"); v != 52 {
+ t.Fatalf("Expected 52 got %q", v)
+ }
+
+ if v := Get("given2.foo"); v != 52 {
+ t.Fatalf("Expected 52 got %q", v)
+ }
+
+ if v := Get("given2.bar.bcd"); v != "A" {
+ t.Fatalf("Expected A got %q", v)
+ }
+
+ if _, ok := m2["Foo"]; !ok {
+ t.Fatal("Input map changed")
+ }
+
+ // Verify Set
+ if v := Get("number1"); v != 42 {
+ t.Fatalf("Expected 42 got %q", v)
+ }
+
+ if v := Get("given1.foo"); v != 32 {
+ t.Fatalf("Expected 32 got %q", v)
+ }
+
+ if v := Get("given1.bar.abc"); v != "A" {
+ t.Fatalf("Expected A got %q", v)
+ }
+
+ if _, ok := m1["Foo"]; !ok {
+ t.Fatal("Input map changed")
+ }
+}
+
+func doTestCaseInsensitive(t *testing.T, typ, config string) {
+ initConfig(typ, config)
+ Set("RfD", true)
+ assert.Equal(t, true, Get("rfd"))
+ assert.Equal(t, true, Get("rFD"))
+ assert.Equal(t, 1, cast.ToInt(Get("abcd")))
+ assert.Equal(t, 1, cast.ToInt(Get("Abcd")))
+ assert.Equal(t, 2, cast.ToInt(Get("ef.gh")))
+ assert.Equal(t, 3, cast.ToInt(Get("ef.ijk")))
+ assert.Equal(t, 4, cast.ToInt(Get("ef.lm.no")))
+ assert.Equal(t, 5, cast.ToInt(Get("ef.lm.p.q")))
+
+}
+
+func BenchmarkGetBool(b *testing.B) {
+ key := "BenchmarkGetBool"
+ v = New()
+ v.Set(key, true)
+
+ for i := 0; i < b.N; i++ {
+ if !v.GetBool(key) {
+ b.Fatal("GetBool returned false")
+ }
+ }
+}
+
+func BenchmarkGet(b *testing.B) {
+ key := "BenchmarkGet"
+ v = New()
+ v.Set(key, true)
+
+ for i := 0; i < b.N; i++ {
+ if !v.Get(key).(bool) {
+ b.Fatal("Get returned false")
+ }
+ }
+}
+
+// This is the "perfect result" for the above.
+func BenchmarkGetBoolFromMap(b *testing.B) {
+ m := make(map[string]bool)
+ key := "BenchmarkGetBool"
+ m[key] = true
+
+ for i := 0; i < b.N; i++ {
+ if !m[key] {
+ b.Fatal("Map value was false")
+ }
+ }
+}