summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/mattermost/rsc/arq
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/mattermost/rsc/arq')
-rw-r--r--vendor/github.com/mattermost/rsc/arq/arq.go663
-rw-r--r--vendor/github.com/mattermost/rsc/arq/arqfs/main.go247
-rw-r--r--vendor/github.com/mattermost/rsc/arq/crypto.go93
-rw-r--r--vendor/github.com/mattermost/rsc/arq/data.go240
-rw-r--r--vendor/github.com/mattermost/rsc/arq/hist/hist.go160
-rw-r--r--vendor/github.com/mattermost/rsc/arq/unpack.go227
6 files changed, 0 insertions, 1630 deletions
diff --git a/vendor/github.com/mattermost/rsc/arq/arq.go b/vendor/github.com/mattermost/rsc/arq/arq.go
deleted file mode 100644
index 85a5138e9..000000000
--- a/vendor/github.com/mattermost/rsc/arq/arq.go
+++ /dev/null
@@ -1,663 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package arq implements read-only access to Arq backups stored on S3.
-// Arq is a Mac backup tool (http://www.haystacksoftware.com/arq/)
-// but the package can read the backups regardless of operating system.
-package arq
-
-import (
- "bytes"
- "compress/gzip"
- "encoding/binary"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "runtime"
- "strings"
- "time"
-
- "github.com/mattermost/rsc/plist"
- "launchpad.net/goamz/aws"
- "launchpad.net/goamz/s3"
-)
-
-// A Conn represents a connection to an S3 server holding Arq backups.
-type Conn struct {
- b *s3.Bucket
- cache string
- altCache string
-}
-
-// cachedir returns the canonical directory in which to cache data.
-func cachedir() string {
- if runtime.GOOS == "darwin" {
- return filepath.Join(os.Getenv("HOME"), "Library/Caches/arq-cache")
- }
- return filepath.Join(os.Getenv("HOME"), ".cache/arq-cache")
-}
-
-// Dial establishes a connection to an S3 server holding Arq backups.
-func Dial(auth aws.Auth) (*Conn, error) {
- buck := fmt.Sprintf("%s-com-haystacksoftware-arq", strings.ToLower(auth.AccessKey))
- b := s3.New(auth, aws.USEast).Bucket(buck)
- c := &Conn{
- b: b,
- cache: filepath.Join(cachedir(), buck),
- }
- if runtime.GOOS == "darwin" {
- c.altCache = filepath.Join(os.Getenv("HOME"), "Library/Arq/Cache.noindex/"+buck)
- }
-
- // Check that the bucket works by listing computers (relatively cheap).
- if _, err := c.list("", "/", 10); err != nil {
- return nil, err
- }
-
- // Create S3 lookaside cache directory.
-
- return c, nil
-}
-
-func (c *Conn) list(prefix, delim string, max int) (*s3.ListResp, error) {
- resp, err := c.b.List(prefix, delim, "", max)
- if err != nil {
- return nil, err
- }
- ret := resp
- for max == 0 && resp.IsTruncated {
- last := resp.Contents[len(resp.Contents)-1].Key
- resp, err = c.b.List(prefix, delim, last, max)
- if err != nil {
- return ret, err
- }
- ret.Contents = append(ret.Contents, resp.Contents...)
- ret.CommonPrefixes = append(ret.CommonPrefixes, resp.CommonPrefixes...)
- }
- return ret, nil
-}
-
-func (c *Conn) altCachePath(name string) string {
- if c.altCache == "" || !strings.Contains(name, "/packsets/") {
- return ""
- }
- i := strings.Index(name, "-trees/")
- if i < 0 {
- i = strings.Index(name, "-blobs/")
- if i < 0 {
- return ""
- }
- }
- i += len("-trees/") + 2
- if i >= len(name) {
- return ""
- }
- return filepath.Join(c.altCache, name[:i]+"/"+name[i:])
-}
-
-func (c *Conn) cget(name string) (data []byte, err error) {
- cache := filepath.Join(c.cache, name)
- f, err := os.Open(cache)
- if err == nil {
- defer f.Close()
- return ioutil.ReadAll(f)
- }
- if altCache := c.altCachePath(name); altCache != "" {
- f, err := os.Open(altCache)
- if err == nil {
- defer f.Close()
- return ioutil.ReadAll(f)
- }
- }
-
- data, err = c.bget(name)
- if err != nil {
- return nil, err
- }
-
- dir, _ := filepath.Split(cache)
- os.MkdirAll(dir, 0700)
- ioutil.WriteFile(cache, data, 0600)
- return data, nil
-}
-
-func (c *Conn) bget(name string) (data []byte, err error) {
- for i := 0; ; {
- data, err = c.b.Get(name)
- if err == nil {
- break
- }
- if i++; i >= 5 {
- return nil, err
- }
- log.Print(err)
- }
- return data, nil
-}
-
-func (c *Conn) DeleteCache() {
- os.RemoveAll(c.cache)
-}
-
-// Computers returns a list of the computers with backups available on the S3 server.
-func (c *Conn) Computers() ([]*Computer, error) {
- // Each backup is a top-level directory with a computerinfo file in it.
- list, err := c.list("", "/", 0)
- if err != nil {
- return nil, err
- }
- var out []*Computer
- for _, p := range list.CommonPrefixes {
- data, err := c.bget(p + "computerinfo")
- if err != nil {
- continue
- }
- var info computerInfo
- if err := plist.Unmarshal(data, &info); err != nil {
- return nil, err
- }
-
- comp := &Computer{
- Name: info.ComputerName,
- User: info.UserName,
- UUID: p[:len(p)-1],
- conn: c,
- index: map[score]ientry{},
- }
-
- salt, err := c.cget(p + "salt")
- if err != nil {
- return nil, err
- }
- comp.crypto.salt = salt
-
- out = append(out, comp)
- }
- return out, nil
-}
-
-// A Computer represents a computer with backups (Folders).
-type Computer struct {
- Name string // name of computer
- User string // name of user
- UUID string
- conn *Conn
- crypto cryptoState
- index map[score]ientry
-}
-
-// Folders returns a list of the folders that have been backed up on the computer.
-func (c *Computer) Folders() ([]*Folder, error) {
- // Each folder is a file under computer/buckets/.
- list, err := c.conn.list(c.UUID+"/buckets/", "", 0)
- if err != nil {
- return nil, err
- }
- var out []*Folder
- for _, obj := range list.Contents {
- data, err := c.conn.bget(obj.Key)
- if err != nil {
- return nil, err
- }
- var info folderInfo
- if err := plist.Unmarshal(data, &info); err != nil {
- return nil, err
- }
- out = append(out, &Folder{
- Path: info.LocalPath,
- uuid: info.BucketUUID,
- comp: c,
- conn: c.conn,
- })
- }
- return out, nil
-}
-
-// Unlock records the password to use when decrypting
-// backups from this computer. It must be called before calling Trees
-// in any folder obtained for this computer.
-func (c *Computer) Unlock(pw string) {
- c.crypto.unlock(pw)
-}
-
-func (c *Computer) scget(sc score) ([]byte, error) {
- if c.crypto.c == nil {
- return nil, fmt.Errorf("computer not yet unlocked")
- }
-
- var data []byte
- var err error
- ie, ok := c.index[sc]
- if ok {
- data, err = c.conn.cget(ie.File)
- if err != nil {
- return nil, err
- }
-
- //fmt.Printf("offset size %d %d\n", ie.Offset, ie.Size)
- if len(data) < int(ie.Offset+ie.Size) {
- return nil, fmt.Errorf("short pack block")
- }
-
- data = data[ie.Offset:]
- if ie.Size < 1+8+1+8+8 {
- return nil, fmt.Errorf("short pack block")
- }
-
- bo := binary.BigEndian
-
- if data[0] != 1 {
- return nil, fmt.Errorf("missing mime type")
- }
- n := bo.Uint64(data[1:])
- if 1+8+n > uint64(len(data)) {
- return nil, fmt.Errorf("malformed mime type")
- }
- mimeType := data[1+8 : 1+8+n]
- data = data[1+8+n:]
-
- n = bo.Uint64(data[1:])
- if 1+8+n > uint64(len(data)) {
- return nil, fmt.Errorf("malformed name")
- }
- name := data[1+8 : 1+8+n]
- data = data[1+8+n:]
-
- _, _ = mimeType, name
- // fmt.Printf("%s %s\n", mimeType, name)
-
- n = bo.Uint64(data[0:])
- if int64(n) != ie.Size {
- return nil, fmt.Errorf("unexpected data length %d %d", n, ie.Size)
- }
- if 8+n > uint64(len(data)) {
- return nil, fmt.Errorf("short data %d %d", 8+n, len(data))
- }
-
- data = data[8 : 8+n]
- } else {
- data, err = c.conn.cget(c.UUID + "/objects/" + sc.String())
- if err != nil {
- log.Fatal(err)
- }
- }
-
- data = c.crypto.decrypt(data)
- return data, nil
-}
-
-// A Folder represents a backed-up tree on a computer.
-type Folder struct {
- Path string // root of tree of last backup
- uuid string
- comp *Computer
- conn *Conn
-}
-
-// Load loads xxx
-func (f *Folder) Load() error {
- if err := f.comp.loadPack(f.uuid, "-trees"); err != nil {
- return err
- }
- if err := f.comp.loadPack(f.uuid, "-blobs"); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Computer) loadPack(fold, suf string) error {
- list, err := c.conn.list(c.UUID+"/packsets/"+fold+suf+"/", "", 0)
- if err != nil {
- return err
- }
-
- for _, obj := range list.Contents {
- if !strings.HasSuffix(obj.Key, ".index") {
- continue
- }
- data, err := c.conn.cget(obj.Key)
- if err != nil {
- return err
- }
- // fmt.Printf("pack %s\n", obj.Key)
- c.saveIndex(obj.Key[:len(obj.Key)-len(".index")]+".pack", data)
- }
- return nil
-}
-
-func (c *Computer) saveIndex(file string, data []byte) error {
- const (
- headerSize = 4 + 4 + 4*256
- entrySize = 8 + 8 + 20 + 4
- trailerSize = 20
- )
- bo := binary.BigEndian
- if len(data) < headerSize+trailerSize {
- return fmt.Errorf("short index")
- }
- i := len(data) - trailerSize
- sum1 := sha(data[:i])
- sum2 := binaryScore(data[i:])
- if !sum1.Equal(sum2) {
- return fmt.Errorf("invalid sha index")
- }
-
- obj := data[headerSize : len(data)-trailerSize]
- n := len(obj) / entrySize
- if n*entrySize != len(obj) {
- return fmt.Errorf("misaligned index %d %d", n*entrySize, len(obj))
- }
- nn := bo.Uint32(data[headerSize-4:])
- if int(nn) != n {
- return fmt.Errorf("inconsistent index %d %d\n", nn, n)
- }
-
- for i := 0; i < n; i++ {
- e := obj[i*entrySize:]
- var ie ientry
- ie.File = file
- ie.Offset = int64(bo.Uint64(e[0:]))
- ie.Size = int64(bo.Uint64(e[8:]))
- ie.Score = binaryScore(e[16:])
- c.index[ie.Score] = ie
- }
- return nil
-}
-
-// Trees returns a list of the individual backup snapshots for the folder.
-// Note that different trees from the same Folder might have different Paths
-// if the folder was "relocated" using the Arq interface.
-func (f *Folder) Trees() ([]*Tree, error) {
- data, err := f.conn.bget(f.comp.UUID + "/bucketdata/" + f.uuid + "/refs/heads/master")
- if err != nil {
- return nil, err
- }
- sc := hexScore(string(data))
- if err != nil {
- return nil, err
- }
-
- var out []*Tree
- for {
- data, err = f.comp.scget(sc)
- if err != nil {
- return nil, err
- }
-
- var com commit
- if err := unpack(data, &com); err != nil {
- return nil, err
- }
-
- var info folderInfo
- if err := plist.Unmarshal(com.BucketXML, &info); err != nil {
- return nil, err
- }
-
- t := &Tree{
- Time: com.CreateTime,
- Path: info.LocalPath,
- Score: com.Tree.Score,
-
- commit: com,
- comp: f.comp,
- folder: f,
- info: info,
- }
- out = append(out, t)
-
- if len(com.ParentCommits) == 0 {
- break
- }
-
- sc = com.ParentCommits[0].Score
- }
-
- for i, n := 0, len(out)-1; i < n-i; i++ {
- out[i], out[n-i] = out[n-i], out[i]
- }
- return out, nil
-}
-
-func (f *Folder) Trees2() ([]*Tree, error) {
- list, err := f.conn.list(f.comp.UUID+"/bucketdata/"+f.uuid+"/refs/logs/master/", "", 0)
- if err != nil {
- return nil, err
- }
-
- var out []*Tree
- for _, obj := range list.Contents {
- data, err := f.conn.cget(obj.Key)
- if err != nil {
- return nil, err
- }
- var l reflog
- if err := plist.Unmarshal(data, &l); err != nil {
- return nil, err
- }
-
- sc := hexScore(l.NewHeadSHA1)
- if err != nil {
- return nil, err
- }
-
- data, err = f.comp.scget(sc)
- if err != nil {
- return nil, err
- }
-
- var com commit
- if err := unpack(data, &com); err != nil {
- return nil, err
- }
-
- var info folderInfo
- if err := plist.Unmarshal(com.BucketXML, &info); err != nil {
- return nil, err
- }
-
- t := &Tree{
- Time: com.CreateTime,
- Path: info.LocalPath,
- Score: com.Tree.Score,
-
- commit: com,
- comp: f.comp,
- folder: f,
- info: info,
- }
- out = append(out, t)
- }
- return out, nil
-}
-
-// A Tree represents a single backed-up file tree snapshot.
-type Tree struct {
- Time time.Time // time back-up completed
- Path string // root of backed-up tree
- Score [20]byte
-
- comp *Computer
- folder *Folder
- commit commit
- info folderInfo
-
- raw tree
- haveRaw bool
-}
-
-// Root returns the File for the tree's root directory.
-func (t *Tree) Root() (*File, error) {
- if !t.haveRaw {
- data, err := t.comp.scget(t.Score)
- if err != nil {
- return nil, err
- }
- if err := unpack(data, &t.raw); err != nil {
- return nil, err
- }
- t.haveRaw = true
- }
-
- dir := &File{
- t: t,
- dir: &t.raw,
- n: &nameNode{"/", node{IsTree: true}},
- }
- return dir, nil
-}
-
-// A File represents a file or directory in a tree.
-type File struct {
- t *Tree
- n *nameNode
- dir *tree
- byName map[string]*nameNode
-}
-
-func (f *File) loadDir() error {
- if f.dir == nil {
- data, err := f.t.comp.scget(f.n.Node.Blob[0].Score)
- if err != nil {
- return err
- }
- var dir tree
- if err := unpack(data, &dir); err != nil {
- return err
- }
- f.dir = &dir
- }
- return nil
-}
-
-func (f *File) Lookup(name string) (*File, error) {
- if !f.n.Node.IsTree {
- return nil, fmt.Errorf("lookup in non-directory")
- }
- if f.byName == nil {
- if err := f.loadDir(); err != nil {
- return nil, err
- }
- f.byName = map[string]*nameNode{}
- for _, n := range f.dir.Nodes {
- f.byName[n.Name] = n
- }
- }
- n := f.byName[name]
- if n == nil {
- return nil, fmt.Errorf("no entry %q", name)
- }
- return &File{t: f.t, n: n}, nil
-}
-
-func (f *File) Stat() *Dirent {
- if f.n.Node.IsTree {
- if err := f.loadDir(); err == nil {
- return &Dirent{
- Name: f.n.Name,
- ModTime: f.dir.Mtime.Time(),
- Mode: fileMode(f.dir.Mode),
- Size: 0,
- }
- }
- }
- return &Dirent{
- Name: f.n.Name,
- ModTime: f.n.Node.Mtime.Time(),
- Mode: fileMode(f.n.Node.Mode),
- Size: int64(f.n.Node.UncompressedSize),
- }
-}
-
-type Dirent struct {
- Name string
- ModTime time.Time
- Mode os.FileMode
- Size int64
-}
-
-func (f *File) ReadDir() ([]Dirent, error) {
- if !f.n.Node.IsTree {
- return nil, fmt.Errorf("ReadDir in non-directory")
- }
- if err := f.loadDir(); err != nil {
- return nil, err
- }
- var out []Dirent
- for _, n := range f.dir.Nodes {
- out = append(out, Dirent{
- Name: n.Name,
- ModTime: n.Node.Mtime.Time(),
- Mode: fileMode(n.Node.Mode),
- })
- }
- return out, nil
-}
-
-func (f *File) Open() (io.ReadCloser, error) {
- return &fileReader{t: f.t, blob: f.n.Node.Blob, n: &f.n.Node}, nil
-}
-
-type fileReader struct {
- t *Tree
- n *node
- blob []sscore
- cur io.Reader
- close []io.Closer
-}
-
-func (f *fileReader) Read(b []byte) (int, error) {
- for {
- if f.cur != nil {
- n, err := f.cur.Read(b)
- if n > 0 || err != nil && err != io.EOF {
- return n, err
- }
- for _, cl := range f.close {
- cl.Close()
- }
- f.close = f.close[:0]
- f.cur = nil
- }
-
- if len(f.blob) == 0 {
- break
- }
-
- // TODO: Get a direct reader, not a []byte.
- data, err := f.t.comp.scget(f.blob[0].Score)
- if err != nil {
- return 0, err
- }
- rc := ioutil.NopCloser(bytes.NewBuffer(data))
-
- if f.n.CompressData {
- gz, err := gzip.NewReader(rc)
- if err != nil {
- rc.Close()
- return 0, err
- }
- f.close = append(f.close, gz)
- f.cur = gz
- } else {
- f.cur = rc
- }
- f.close = append(f.close, rc)
- f.blob = f.blob[1:]
- }
-
- return 0, io.EOF
-}
-
-func (f *fileReader) Close() error {
- for _, cl := range f.close {
- cl.Close()
- }
- f.close = f.close[:0]
- f.cur = nil
- return nil
-}
diff --git a/vendor/github.com/mattermost/rsc/arq/arqfs/main.go b/vendor/github.com/mattermost/rsc/arq/arqfs/main.go
deleted file mode 100644
index 9e9001133..000000000
--- a/vendor/github.com/mattermost/rsc/arq/arqfs/main.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Arqfs implements a file system interface to a collection of Arq backups.
-
- usage: arqfs [-m mtpt]
-
-Arqfs mounts the Arq backups on the file system directory mtpt,
-(default /mnt/arq). The directory must exist and be writable by
-the current user.
-
-Arq
-
-Arq is an Amazon S3-based backup system for OS X and sold by
-Haystack Software (http://www.haystacksoftware.com/arq/).
-This software reads backups written by Arq.
-It is not affiliated with or connected to Haystack Software.
-
-Passwords
-
-Arqfs reads necessary passwords from the OS X keychain.
-It expects at least two entries:
-
-The keychain entry for s3.amazonaws.com should list the Amazon S3 access ID
-as user name and the S3 secret key as password.
-
-Each backup being accessed must have its own keychain entry for
-host arq.swtch.com, listing the backup UUID as user name and the encryption
-password as the password.
-
-Arqfs will not prompt for passwords or create these entries itself: they must
-be created using the Keychain Access application.
-
-FUSE
-
-Arqfs creates a virtual file system using the FUSE file system layer.
-On OS X, it requires OSXFUSE (http://osxfuse.github.com/).
-
-Cache
-
-Reading the Arq backups efficiently requires caching directory tree information
-on local disk instead of reading the same data from S3 repeatedly. Arqfs caches
-data downloaded from S3 in $HOME/Library/Caches/arq-cache/.
-If an Arq installation is present on the same machine, arqfs will look in
-its cache ($HOME/Library/Arq/Cache.noindex) first, but arqfs will not
-write to Arq's cache directory.
-
-Bugs
-
-Arqfs only runs on OS X for now, because both FUSE and the keychain access
-packages have not been ported to other systems yet.
-
-Both Arqfs and the FUSE package on which it is based have seen only light
-use. There are likely to be bugs. Mail rsc@swtch.com with reports.
-
-*/
-package main
-
-import (
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "syscall"
-
- "github.com/mattermost/rsc/arq"
- "github.com/mattermost/rsc/fuse"
- "github.com/mattermost/rsc/keychain"
- "launchpad.net/goamz/aws"
-)
-
-var mtpt = flag.String("m", "/mnt/arq", "")
-
-func main() {
- log.SetFlags(0)
-
- if len(os.Args) == 3 && os.Args[1] == "MOUNTSLAVE" {
- *mtpt = os.Args[2]
- mountslave()
- return
- }
-
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "usage: arqfs [-m /mnt/arq]\n")
- os.Exit(2)
- }
- flag.Parse()
- if len(flag.Args()) != 0 {
- flag.Usage()
- }
-
- // Run in child so that we can exit once child is running.
- r, w, err := os.Pipe()
- if err != nil {
- log.Fatal(err)
- }
-
- cmd := exec.Command(os.Args[0], "MOUNTSLAVE", *mtpt)
- cmd.Stdout = w
- cmd.Stderr = os.Stderr
- if err := cmd.Start(); err != nil {
- log.Fatalf("mount process: %v", err)
- }
- w.Close()
-
- buf := make([]byte, 10)
- n, _ := r.Read(buf)
- if n != 2 || string(buf[0:2]) != "OK" {
- os.Exit(1)
- }
-
- fmt.Fprintf(os.Stderr, "mounted on %s\n", *mtpt)
-}
-
-func mountslave() {
- stdout, _ := syscall.Dup(1)
- syscall.Dup2(2, 1)
-
- access, secret, err := keychain.UserPasswd("s3.amazonaws.com", "")
- if err != nil {
- log.Fatal(err)
- }
- auth := aws.Auth{AccessKey: access, SecretKey: secret}
-
- conn, err := arq.Dial(auth)
- if err != nil {
- log.Fatal(err)
- }
-
- comps, err := conn.Computers()
- if err != nil {
- log.Fatal(err)
- }
-
- fs := &fuse.Tree{}
- for _, c := range comps {
- fmt.Fprintf(os.Stderr, "scanning %s...\n", c.Name)
-
- // TODO: Better password protocol.
- _, pw, err := keychain.UserPasswd("arq.swtch.com", c.UUID)
- if err != nil {
- log.Fatal(err)
- }
- c.Unlock(pw)
-
- folders, err := c.Folders()
- if err != nil {
- log.Fatal(err)
- }
-
- lastDate := ""
- n := 0
- for _, f := range folders {
- if err := f.Load(); err != nil {
- log.Fatal(err)
- }
- trees, err := f.Trees()
- if err != nil {
- log.Fatal(err)
- }
- for _, t := range trees {
- y, m, d := t.Time.Date()
- date := fmt.Sprintf("%04d/%02d%02d", y, m, d)
- suffix := ""
- if date == lastDate {
- n++
- suffix = fmt.Sprintf(".%d", n)
- } else {
- n = 0
- }
- lastDate = date
- f, err := t.Root()
- if err != nil {
- log.Print(err)
- }
- // TODO: Pass times to fs.Add.
- // fmt.Fprintf(os.Stderr, "%v %s %x\n", t.Time, c.Name+"/"+date+suffix+"/"+t.Path, t.Score)
- fs.Add(c.Name+"/"+date+suffix+"/"+t.Path, &fuseNode{f})
- }
- }
- }
-
- fmt.Fprintf(os.Stderr, "mounting...\n")
-
- c, err := fuse.Mount(*mtpt)
- if err != nil {
- log.Fatal(err)
- }
- defer exec.Command("umount", *mtpt).Run()
-
- syscall.Write(stdout, []byte("OK"))
- syscall.Close(stdout)
- c.Serve(fs)
-}
-
-type fuseNode struct {
- arq *arq.File
-}
-
-func (f *fuseNode) Attr() fuse.Attr {
- de := f.arq.Stat()
- return fuse.Attr{
- Mode: de.Mode,
- Mtime: de.ModTime,
- Size: uint64(de.Size),
- }
-}
-
-func (f *fuseNode) Lookup(name string, intr fuse.Intr) (fuse.Node, fuse.Error) {
- ff, err := f.arq.Lookup(name)
- if err != nil {
- return nil, fuse.ENOENT
- }
- return &fuseNode{ff}, nil
-}
-
-func (f *fuseNode) ReadDir(intr fuse.Intr) ([]fuse.Dirent, fuse.Error) {
- adir, err := f.arq.ReadDir()
- if err != nil {
- return nil, fuse.EIO
- }
- var dir []fuse.Dirent
- for _, ade := range adir {
- dir = append(dir, fuse.Dirent{
- Name: ade.Name,
- })
- }
- return dir, nil
-}
-
-// TODO: Implement Read+Release, not ReadAll, to avoid giant buffer.
-func (f *fuseNode) ReadAll(intr fuse.Intr) ([]byte, fuse.Error) {
- rc, err := f.arq.Open()
- if err != nil {
- return nil, fuse.EIO
- }
- defer rc.Close()
- data, err := ioutil.ReadAll(rc)
- if err != nil {
- return data, fuse.EIO
- }
- return data, nil
-}
diff --git a/vendor/github.com/mattermost/rsc/arq/crypto.go b/vendor/github.com/mattermost/rsc/arq/crypto.go
deleted file mode 100644
index e567ec36d..000000000
--- a/vendor/github.com/mattermost/rsc/arq/crypto.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package arq
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "crypto/sha1"
- "hash"
- "log"
-
- "bitbucket.org/taruti/pbkdf2.go" // TODO: Pull in copy
-)
-
-type cryptoState struct {
- c cipher.Block
- iv []byte
- salt []byte
-}
-
-func (c *cryptoState) unlock(pw string) {
- const (
- iter = 1000
- keyLen = 48
- aesKeyLen = 32
- aesIVLen = 16
- )
- key1 := pbkdf2.Pbkdf2([]byte(pw), c.salt, iter, sha1.New, keyLen)
- var key2 []byte
- key2, c.iv = bytesToKey(sha1.New, c.salt, key1, iter, aesKeyLen, aesIVLen)
- c.c, _ = aes.NewCipher(key2)
-}
-
-func (c *cryptoState) decrypt(data []byte) []byte {
- dec := cipher.NewCBCDecrypter(c.c, c.iv)
- if len(data)%aes.BlockSize != 0 {
- log.Fatal("bad block")
- }
- dec.CryptBlocks(data, data)
- // fmt.Printf("% x\n", data)
- // fmt.Printf("%s\n", data)
-
- // unpad
- {
- n := len(data)
- p := int(data[n-1])
- if p == 0 || p > aes.BlockSize {
- log.Fatal("impossible padding")
- }
- for i := 0; i < p; i++ {
- if data[n-1-i] != byte(p) {
- log.Fatal("bad padding")
- }
- }
- data = data[:n-p]
- }
- return data
-}
-
-func sha(data []byte) score {
- h := sha1.New()
- h.Write(data)
- var sc score
- copy(sc[:], h.Sum(nil))
- return sc
-}
-
-func bytesToKey(hf func() hash.Hash, salt, data []byte, iter int, keySize, ivSize int) (key, iv []byte) {
- h := hf()
- var d, dcat []byte
- sum := make([]byte, 0, h.Size())
- for len(dcat) < keySize+ivSize {
- // D_i = HASH^count(D_(i-1) || data || salt)
- h.Reset()
- h.Write(d)
- h.Write(data)
- h.Write(salt)
- sum = h.Sum(sum[:0])
-
- for j := 1; j < iter; j++ {
- h.Reset()
- h.Write(sum)
- sum = h.Sum(sum[:0])
- }
-
- d = append(d[:0], sum...)
- dcat = append(dcat, d...)
- }
-
- return dcat[:keySize], dcat[keySize : keySize+ivSize]
-}
diff --git a/vendor/github.com/mattermost/rsc/arq/data.go b/vendor/github.com/mattermost/rsc/arq/data.go
deleted file mode 100644
index 74eaba450..000000000
--- a/vendor/github.com/mattermost/rsc/arq/data.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// On-cloud data structures
-
-package arq
-
-import (
- "fmt"
- "os"
- "time"
-)
-
-// plist data structures
-
-type computerInfo struct {
- UserName string `plist:"userName"`
- ComputerName string `plist:"computerName"`
-}
-
-type folderInfo struct {
- BucketUUID string
- BucketName string
- ComputerUUID string
- LocalPath string
- LocalMountPoint string
- // don't care about IgnoredRelativePaths or Excludes
-}
-
-type reflog struct {
- OldHeadSHA1 string `plist:"oldHeadSHA1"`
- NewHeadSHA1 string `plist:"newHeadSHA1"`
-}
-
-// binary data structures
-
-type score [20]byte
-
-type sscore struct {
- Score score `arq:"HexScore"`
- StretchKey bool // v4+
-}
-
-type tag string
-
-type commit struct {
- Tag tag `arq:"CommitV005"`
- Author string
- Comment string
- ParentCommits []sscore
- Tree sscore
- Location string
- MergeCommonAncestor sscore
- CreateTime time.Time
- Failed []failed // v3+
- BucketXML []byte // v5+
-}
-
-type tree struct {
- Tag tag `arq:"TreeV015"`
- CompressXattr bool
- CompressACL bool
- Xattr sscore
- XattrSize uint64
- ACL sscore
- Uid int32
- Gid int32
- Mode int32
- Mtime unixTime
- Flags int64
- FinderFlags int32
- XFinderFlags int32
- StDev int32
- StIno int32
- StNlink uint32
- StRdev int32
- Ctime unixTime
- StBlocks int64
- StBlksize uint32
- AggrSize uint64
- Crtime unixTime
- Nodes []*nameNode `arq:"count32"`
-}
-
-type nameNode struct {
- Name string
- Node node
-}
-
-type node struct {
- IsTree bool
- CompressData bool
- CompressXattr bool
- CompressACL bool
- Blob []sscore `arq:"count32"`
- UncompressedSize uint64
- Thumbnail sscore
- Preview sscore
- Xattr sscore
- XattrSize uint64
- ACL sscore
- Uid int32
- Gid int32
- Mode int32
- Mtime unixTime
- Flags int64
- FinderFlags int32
- XFinderFlags int32
- FinderFileType string
- FinderFileCreator string
- IsExtHidden bool
- StDev int32
- StIno int32
- StNlink uint32
- StRdev int32
- Ctime unixTime
- CreateTime unixTime
- StBlocks int64
- StBlksize uint32
-}
-
-func fileMode(m int32) os.FileMode {
- const (
- // Darwin file mode.
- S_IFBLK = 0x6000
- S_IFCHR = 0x2000
- S_IFDIR = 0x4000
- S_IFIFO = 0x1000
- S_IFLNK = 0xa000
- S_IFMT = 0xf000
- S_IFREG = 0x8000
- S_IFSOCK = 0xc000
- S_IFWHT = 0xe000
- S_ISGID = 0x400
- S_ISTXT = 0x200
- S_ISUID = 0x800
- S_ISVTX = 0x200
- )
- mode := os.FileMode(m & 0777)
- switch m & S_IFMT {
- case S_IFBLK, S_IFWHT:
- mode |= os.ModeDevice
- case S_IFCHR:
- mode |= os.ModeDevice | os.ModeCharDevice
- case S_IFDIR:
- mode |= os.ModeDir
- case S_IFIFO:
- mode |= os.ModeNamedPipe
- case S_IFLNK:
- mode |= os.ModeSymlink
- case S_IFREG:
- // nothing to do
- case S_IFSOCK:
- mode |= os.ModeSocket
- }
- if m&S_ISGID != 0 {
- mode |= os.ModeSetgid
- }
- if m&S_ISUID != 0 {
- mode |= os.ModeSetuid
- }
- if m&S_ISVTX != 0 {
- mode |= os.ModeSticky
- }
- return mode
-}
-
-type unixTime struct {
- Sec int64
- Nsec int64
-}
-
-func (t *unixTime) Time() time.Time {
- return time.Unix(t.Sec, t.Nsec)
-}
-
-type failed struct {
- Path string
- Error string
-}
-
-type ientry struct {
- File string
- Offset int64
- Size int64
- Score score
-}
-
-func (s score) Equal(t score) bool {
- for i := range s {
- if s[i] != t[i] {
- return false
- }
- }
- return true
-}
-
-func (s score) String() string {
- return fmt.Sprintf("%x", s[:])
-}
-
-func binaryScore(b []byte) score {
- if len(b) < 20 {
- panic("BinaryScore: not enough data")
- }
- var sc score
- copy(sc[:], b)
- return sc
-}
-
-func hexScore(b string) score {
- if len(b) < 40 {
- panic("HexScore: not enough data")
- }
- var sc score
- for i := 0; i < 40; i++ {
- ch := b[i]
- if '0' <= ch && ch <= '9' {
- ch -= '0'
- } else if 'a' <= ch && ch <= 'f' {
- ch -= 'a' - 10
- } else {
- panic("HexScore: invalid lower hex digit")
- }
- if i%2 == 0 {
- ch <<= 4
- }
- sc[i/2] |= ch
- }
- return sc
-}
-
-func (ss sscore) String() string {
- str := ss.Score.String()
- if ss.StretchKey {
- str += "Y"
- }
- return str
-}
diff --git a/vendor/github.com/mattermost/rsc/arq/hist/hist.go b/vendor/github.com/mattermost/rsc/arq/hist/hist.go
deleted file mode 100644
index 02fb5fbf0..000000000
--- a/vendor/github.com/mattermost/rsc/arq/hist/hist.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Hist shows the history of a given file, using Arq backups.
-
- usage: hist [-d] [-h host] [-m mtpt] [-s yyyy/mmdd] file ...
-
-The -d flag causes it to show diffs between successive versions.
-
-By default, hist assumes backups are mounted at mtpt/host, where
-mtpt defaults to /mnt/arq and host is the first element of the local host name.
-Hist starts the file list with the present copy of the file.
-
-The -h and -s flags override these assumptions.
-
-*/
-package main
-
-import (
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
- "strings"
-)
-
-var usageString = `usage: hist [-d] [-h host] [-m mtpt] [-s yyyy/mmdd] file ...
-
-Hist lists the known versions of the given file.
-The -d flag causes it to show diffs between successive versions.
-
-By default, hist assumes backups are mounted at mtpt/host, where
-mtpt defaults to /mnt/arq and host is the first element of the local host name.
-Hist starts the file list with the present copy of the file.
-
-The -h and -s flags override these assumptions.
-`
-
-var (
- diff = flag.Bool("d", false, "diff")
- host = flag.String("h", defaultHost(), "host name")
- mtpt = flag.String("m", "/mnt/arq", "mount point")
- vers = flag.String("s", "", "version")
-)
-
-func defaultHost() string {
- name, _ := os.Hostname()
- if name == "" {
- name = "gnot"
- }
- if i := strings.Index(name, "."); i >= 0 {
- name = name[:i]
- }
- return name
-}
-
-func main() {
- flag.Usage = func() {
- fmt.Fprint(os.Stderr, usageString)
- os.Exit(2)
- }
-
- flag.Parse()
- args := flag.Args()
- if len(args) == 0 {
- flag.Usage()
- }
-
- dates := loadDates()
- for _, file := range args {
- list(dates, file)
- }
-}
-
-var (
- yyyy = regexp.MustCompile(`^\d{4}$`)
- mmdd = regexp.MustCompile(`^\d{4}(\.\d+)?$`)
-)
-
-func loadDates() []string {
- var all []string
- ydir, err := ioutil.ReadDir(filepath.Join(*mtpt, *host))
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(3)
- }
- for _, y := range ydir {
- if !y.IsDir() || !yyyy.MatchString(y.Name()) {
- continue
- }
- ddir, err := ioutil.ReadDir(filepath.Join(*mtpt, *host, y.Name()))
- if err != nil {
- continue
- }
- for _, d := range ddir {
- if !d.IsDir() || !mmdd.MatchString(d.Name()) {
- continue
- }
- date := y.Name() + "/" + d.Name()
- if *vers > date {
- continue
- }
- all = append(all, filepath.Join(*mtpt, *host, date))
- }
- }
- return all
-}
-
-const timeFormat = "Jan 02 15:04:05 MST 2006"
-
-func list(dates []string, file string) {
- var (
- last os.FileInfo
- lastPath string
- )
-
- fi, err := os.Stat(file)
- if err != nil {
- fmt.Fprintf(os.Stderr, "hist: warning: %s: %v\n", file, err)
- } else {
- fmt.Printf("%s %s %d\n", fi.ModTime().Format(timeFormat), file, fi.Size())
- last = fi
- lastPath = file
- }
-
- file, err = filepath.Abs(file)
- if err != nil {
- fmt.Fprintf(os.Stderr, "hist: abs: %v\n", err)
- return
- }
-
- for i := len(dates)-1; i >= 0; i-- {
- p := filepath.Join(dates[i], file)
- fi, err := os.Stat(p)
- if err != nil {
- continue
- }
- if last != nil && fi.ModTime() == last.ModTime() && fi.Size() == last.Size() {
- continue
- }
- if *diff {
- cmd := exec.Command("diff", lastPath, p)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if err := cmd.Start(); err != nil {
- fmt.Fprintf(os.Stderr, "%s\n", err)
- }
- cmd.Wait()
- }
- fmt.Printf("%s %s %d\n", fi.ModTime().Format(timeFormat), p, fi.Size())
- last = fi
- lastPath = p
- }
-}
-
diff --git a/vendor/github.com/mattermost/rsc/arq/unpack.go b/vendor/github.com/mattermost/rsc/arq/unpack.go
deleted file mode 100644
index ec4296a7c..000000000
--- a/vendor/github.com/mattermost/rsc/arq/unpack.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Parsing of Arq's binary data structures.
-
-package arq
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "reflect"
- "time"
-)
-
-var errMalformed = fmt.Errorf("malformed data")
-var tagType = reflect.TypeOf(tag(""))
-var timeType = reflect.TypeOf(time.Time{})
-var scoreType = reflect.TypeOf(score{})
-
-func unpack(data []byte, v interface{}) error {
- data, err := unpackValue(data, reflect.ValueOf(v).Elem(), "")
- if err != nil {
- return err
- }
- if len(data) != 0 {
- if len(data) > 100 {
- return fmt.Errorf("more data than expected: %x...", data[:100])
- }
- return fmt.Errorf("more data than expected: %x", data)
- }
- return nil
-}
-
-func unpackValue(data []byte, v reflect.Value, tag string) ([]byte, error) {
- //println("unpackvalue", v.Type().String(), len(data))
- switch v.Kind() {
- case reflect.String:
- if v.Type() == tagType {
- if tag == "" {
- panic("arqfs: missing reflect tag on Tag field")
- }
- if len(data) < len(tag) || !bytes.Equal(data[:len(tag)], []byte(tag)) {
- return nil, errMalformed
- }
- data = data[len(tag):]
- return data, nil
- }
- if len(data) < 1 {
- return nil, errMalformed
- }
- if data[0] == 0 {
- data = data[1:]
- v.SetString("")
- return data, nil
- }
- if data[0] != 1 || len(data) < 1+8 {
- return nil, errMalformed
- }
- n := binary.BigEndian.Uint64(data[1:])
- data = data[1+8:]
- if n >= uint64(len(data)) {
- return nil, errMalformed
- }
- str := data[:n]
- data = data[n:]
- v.SetString(string(str))
- return data, nil
-
- case reflect.Uint32:
- if len(data) < 4 {
- return nil, errMalformed
- }
- v.SetUint(uint64(binary.BigEndian.Uint32(data)))
- data = data[4:]
- return data, nil
-
- case reflect.Int32:
- if len(data) < 4 {
- return nil, errMalformed
- }
- v.SetInt(int64(binary.BigEndian.Uint32(data)))
- data = data[4:]
- return data, nil
-
- case reflect.Uint8:
- if len(data) < 1 {
- return nil, errMalformed
- }
- v.SetUint(uint64(data[0]))
- data = data[1:]
- return data, nil
-
- case reflect.Uint64:
- if len(data) < 8 {
- return nil, errMalformed
- }
- v.SetUint(binary.BigEndian.Uint64(data))
- data = data[8:]
- return data, nil
-
- case reflect.Int64:
- if len(data) < 8 {
- return nil, errMalformed
- }
- v.SetInt(int64(binary.BigEndian.Uint64(data)))
- data = data[8:]
- return data, nil
-
- case reflect.Ptr:
- v.Set(reflect.New(v.Type().Elem()))
- return unpackValue(data, v.Elem(), tag)
-
- case reflect.Slice:
- var n int
- if tag == "count32" {
- n32 := binary.BigEndian.Uint32(data)
- n = int(n32)
- if uint32(n) != n32 {
- return nil, errMalformed
- }
- data = data[4:]
- } else {
- if len(data) < 8 {
- return nil, errMalformed
- }
- n64 := binary.BigEndian.Uint64(data)
- n = int(n64)
- if uint64(n) != n64 {
- return nil, errMalformed
- }
- data = data[8:]
- }
- v.Set(v.Slice(0, 0))
- if v.Type().Elem().Kind() == reflect.Uint8 {
- // Fast case for []byte
- if len(data) < n {
- return nil, errMalformed
- }
- v.Set(reflect.AppendSlice(v, reflect.ValueOf(data[:n])))
- return data[n:], nil
- }
- for i := 0; i < n; i++ {
- elem := reflect.New(v.Type().Elem()).Elem()
- var err error
- data, err = unpackValue(data, elem, "")
- if err != nil {
- return nil, err
- }
- v.Set(reflect.Append(v, elem))
- }
- return data, nil
-
- case reflect.Array:
- if v.Type() == scoreType && tag == "HexScore" {
- var s string
- data, err := unpackValue(data, reflect.ValueOf(&s).Elem(), "")
- if err != nil {
- return nil, err
- }
- if len(s) != 0 {
- v.Set(reflect.ValueOf(hexScore(s)))
- }
- return data, nil
- }
- n := v.Len()
- if v.Type().Elem().Kind() == reflect.Uint8 {
- // Fast case for [n]byte
- if len(data) < n {
- return nil, errMalformed
- }
- reflect.Copy(v, reflect.ValueOf(data))
- data = data[n:]
- return data, nil
- }
- for i := 0; i < n; i++ {
- var err error
- data, err = unpackValue(data, v.Index(i), "")
- if err != nil {
- return nil, err
- }
- }
- return data, nil
-
- case reflect.Bool:
- if len(data) < 1 || data[0] > 1 {
- if len(data) >= 1 {
- println("badbool", data[0])
- }
- return nil, errMalformed
- }
- v.SetBool(data[0] == 1)
- data = data[1:]
- return data, nil
-
- case reflect.Struct:
- if v.Type() == timeType {
- if len(data) < 1 || data[0] > 1 {
- return nil, errMalformed
- }
- if data[0] == 0 {
- v.Set(reflect.ValueOf(time.Time{}))
- return data[1:], nil
- }
- data = data[1:]
- if len(data) < 8 {
- return nil, errMalformed
- }
- ms := binary.BigEndian.Uint64(data)
- v.Set(reflect.ValueOf(time.Unix(int64(ms/1e3), int64(ms%1e3)*1e6)))
- return data[8:], nil
- }
- for i := 0; i < v.NumField(); i++ {
- f := v.Type().Field(i)
- fv := v.Field(i)
- var err error
- data, err = unpackValue(data, fv, f.Tag.Get("arq"))
- if err != nil {
- return nil, err
- }
- }
- return data, nil
- }
-
- panic("arqfs: unexpected type in unpackValue: " + v.Type().String())
-}