summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/mattermost/rsc/appfs
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/mattermost/rsc/appfs')
-rw-r--r--vendor/github.com/mattermost/rsc/appfs/appfile/main.go156
-rw-r--r--vendor/github.com/mattermost/rsc/appfs/appmount/main.go287
-rw-r--r--vendor/github.com/mattermost/rsc/appfs/client/client.go150
-rw-r--r--vendor/github.com/mattermost/rsc/appfs/fs/fs.go273
-rw-r--r--vendor/github.com/mattermost/rsc/appfs/fs/local.go82
-rw-r--r--vendor/github.com/mattermost/rsc/appfs/proto/data.go55
-rw-r--r--vendor/github.com/mattermost/rsc/appfs/server/app.go982
7 files changed, 1985 insertions, 0 deletions
diff --git a/vendor/github.com/mattermost/rsc/appfs/appfile/main.go b/vendor/github.com/mattermost/rsc/appfs/appfile/main.go
new file mode 100644
index 000000000..6d77df9aa
--- /dev/null
+++ b/vendor/github.com/mattermost/rsc/appfs/appfile/main.go
@@ -0,0 +1,156 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// appfile is a command-line interface to an appfs file system.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+
+ "github.com/mattermost/rsc/appfs/client"
+ "github.com/mattermost/rsc/keychain"
+)
+
+var c client.Client
+
+func init() {
+ flag.StringVar(&c.Host, "h", "localhost:8080", "app serving host")
+ flag.StringVar(&c.User, "u", "", "user name")
+ flag.StringVar(&c.Password, "p", "", "password")
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: appfile [-h host] cmd args...\n")
+ fmt.Fprintf(os.Stderr, "\n")
+ fmt.Fprintf(os.Stderr, "Commands are:\n")
+ for _, c := range cmd {
+ fmt.Fprintf(os.Stderr, "\t%s\n", c.name)
+ }
+ os.Exit(2)
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ args := flag.Args()
+ if len(args) == 0 {
+ usage()
+ }
+
+ if c.Password == "" {
+ var err error
+ c.User, c.Password, err = keychain.UserPasswd(c.Host, "")
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to obtain user and password: %s\n", err)
+ os.Exit(2)
+ }
+ }
+
+ name, args := args[0], args[1:]
+ for _, c := range cmd {
+ if name == c.name {
+ switch c.arg {
+ case 0, 1:
+ if len(args) != c.arg {
+ if c.arg == 0 {
+ fmt.Fprintf(os.Stderr, "%s takes no arguments\n", name)
+ os.Exit(2)
+ }
+ fmt.Fprintf(os.Stderr, "%s requires 1 argument\n", name)
+ os.Exit(2)
+ }
+ case 2:
+ if len(args) == 0 {
+ fmt.Fprintf(os.Stderr, "%s requires at least 1 argument\n", name)
+ os.Exit(2)
+ }
+ }
+ c.fn(args)
+ return
+ }
+ }
+ fmt.Fprintf(os.Stderr, "unknown command %s\n", name)
+ os.Exit(2)
+}
+
+var cmd = []struct {
+ name string
+ fn func([]string)
+ arg int
+}{
+ {"mkdir", mkdir, 2},
+ {"write", write, 1},
+ {"read", read, 2},
+ {"mkfs", mkfs, 0},
+ {"stat", stat, 2},
+}
+
+func mkdir(args []string) {
+ for _, name := range args {
+ if err := c.Create(name, true); err != nil {
+ log.Printf("mkdir %s: %v", name, err)
+ }
+ }
+}
+
+func write(args []string) {
+ name := args[0]
+ data, err := ioutil.ReadAll(os.Stdin)
+ if err != nil {
+ log.Printf("reading stdin: %v", err)
+ return
+ }
+ c.Create(name, false)
+ if err := c.Write(name, data); err != nil {
+ log.Printf("write %s: %v", name, err)
+ }
+}
+
+func read(args []string) {
+ for _, name := range args {
+ fi, err := c.Stat(name)
+ if err != nil {
+ log.Printf("stat %s: %v", name, err)
+ continue
+ }
+ if fi.IsDir {
+ dirs, err := c.ReadDir(name)
+ if err != nil {
+ log.Printf("read %s: %v", name, err)
+ continue
+ }
+ for _, fi := range dirs {
+ fmt.Printf("%+v\n", *fi)
+ }
+ } else {
+ data, err := c.Read(name)
+ if err != nil {
+ log.Printf("read %s: %v", name, err)
+ continue
+ }
+ os.Stdout.Write(data)
+ }
+ }
+}
+
+func mkfs([]string) {
+ if err := c.Mkfs(); err != nil {
+ log.Printf("mkfs: %v", err)
+ }
+}
+
+func stat(args []string) {
+ for _, name := range args {
+ fi, err := c.Stat(name)
+ if err != nil {
+ log.Printf("stat %s: %v", name, err)
+ continue
+ }
+ fmt.Printf("%+v\n", *fi)
+ }
+}
diff --git a/vendor/github.com/mattermost/rsc/appfs/appmount/main.go b/vendor/github.com/mattermost/rsc/appfs/appmount/main.go
new file mode 100644
index 000000000..2c9f867d3
--- /dev/null
+++ b/vendor/github.com/mattermost/rsc/appfs/appmount/main.go
@@ -0,0 +1,287 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// appmount mounts an appfs file system.
+package main
+
+import (
+ "bytes"
+ "encoding/gob"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "strings"
+ "syscall"
+ "time"
+ "sync"
+ "runtime"
+
+ "github.com/mattermost/rsc/appfs/client"
+ "github.com/mattermost/rsc/appfs/proto"
+ "github.com/mattermost/rsc/fuse"
+ "github.com/mattermost/rsc/keychain"
+)
+
+var usageMessage = `usage: appmount [-h host] [-u user] [-p password] /mnt
+
+Appmount mounts the appfs file system on the named mount point.
+
+The default host is localhost:8080.
+`
+
+// Shared between master and slave.
+var z struct {
+ Client client.Client
+ Debug *bool
+ Mtpt string
+}
+
+var fc *fuse.Conn
+var cl = &z.Client
+
+func init() {
+ flag.StringVar(&cl.Host, "h", "localhost:8080", "app serving host")
+ flag.StringVar(&cl.User, "u", "", "user name")
+ flag.StringVar(&cl.Password, "p", "", "password")
+ z.Debug = flag.Bool("debug", false, "")
+}
+
+func usage() {
+ fmt.Fprint(os.Stderr, usageMessage)
+ os.Exit(2)
+}
+
+func main() {
+ log.SetFlags(0)
+
+ if len(os.Args) == 2 && os.Args[1] == "MOUNTSLAVE" {
+ mountslave()
+ return
+ }
+
+ flag.Usage = usage
+ flag.Parse()
+ args := flag.Args()
+ if len(args) == 0 {
+ usage()
+ }
+ z.Mtpt = args[0]
+
+ if cl.Password == "" {
+ var err error
+ cl.User, cl.Password, err = keychain.UserPasswd(cl.Host, "")
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to obtain user and password: %s\n", err)
+ os.Exit(2)
+ }
+ }
+
+ if _, err := cl.Stat("/"); err != nil {
+ log.Fatal(err)
+ }
+
+ // Run in child so that we can exit once child is running.
+ r, w, err := os.Pipe()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ enc.Encode(&z)
+
+ cmd := exec.Command(os.Args[0], "MOUNTSLAVE")
+ cmd.Stdin = &buf
+ cmd.Stdout = w
+ cmd.Stderr = os.Stderr
+ if err := cmd.Start(); err != nil {
+ log.Fatalf("mount process: %v", err)
+ }
+ w.Close()
+
+ ok := make([]byte, 10)
+ n, _ := r.Read(ok)
+ if n != 2 || string(ok[0:2]) != "OK" {
+ os.Exit(1)
+ }
+
+ fmt.Fprintf(os.Stderr, "mounted on %s\n", z.Mtpt)
+}
+
+func mountslave() {
+ stdout, _ := syscall.Dup(1)
+ syscall.Dup2(2, 1)
+
+ r := gob.NewDecoder(os.Stdin)
+ if err := r.Decode(&z); err != nil {
+ log.Fatalf("gob decode: %v", err)
+ }
+
+ fc, err := fuse.Mount(z.Mtpt)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer exec.Command("umount", z.Mtpt).Run()
+
+ if *z.Debug {
+ fuse.Debugf = log.Printf
+ }
+
+ syscall.Write(stdout, []byte("OK"))
+ syscall.Close(stdout)
+ fc.Serve(FS{})
+}
+
+type FS struct{}
+
+func (FS) Root() (fuse.Node, fuse.Error) {
+ return file("/")
+}
+
+type File struct {
+ Name string
+ FileInfo *proto.FileInfo
+ Data []byte
+}
+
+type statEntry struct {
+ fi *proto.FileInfo
+ err error
+ t time.Time
+}
+
+var statCache struct {
+ mu sync.Mutex
+ m map[string] statEntry
+}
+
+func stat(name string) (*proto.FileInfo, error) {
+ if runtime.GOOS == "darwin" && strings.Contains(name, "/._") {
+ // Mac resource forks
+ return nil, fmt.Errorf("file not found")
+ }
+ statCache.mu.Lock()
+ e, ok := statCache.m[name]
+ statCache.mu.Unlock()
+ if ok && time.Since(e.t) < 2*time.Minute {
+ return e.fi, e.err
+ }
+ fi, err := cl.Stat(name)
+ saveStat(name, fi, err)
+ return fi, err
+}
+
+func saveStat(name string, fi *proto.FileInfo, err error) {
+ if *z.Debug {
+if fi != nil {
+ fmt.Fprintf(os.Stderr, "savestat %s %+v\n", name, *fi)
+} else {
+ fmt.Fprintf(os.Stderr, "savestat %s %v\n", name, err)
+}
+ }
+ statCache.mu.Lock()
+ if statCache.m == nil {
+ statCache.m = make(map[string]statEntry)
+ }
+ statCache.m[name] = statEntry{fi, err, time.Now()}
+ statCache.mu.Unlock()
+}
+
+func delStat(name string) {
+ statCache.mu.Lock()
+ if statCache.m != nil {
+ delete(statCache.m, name)
+ }
+ statCache.mu.Unlock()
+}
+
+func file(name string) (fuse.Node, fuse.Error) {
+ fi, err := stat(name)
+ if err != nil {
+ if strings.Contains(err.Error(), "no such entity") {
+ return nil, fuse.ENOENT
+ }
+ if *z.Debug {
+ log.Printf("stat %s: %v", name, err)
+ }
+ return nil, fuse.EIO
+ }
+ return &File{name, fi, nil}, nil
+}
+
+func (f *File) Attr() (attr fuse.Attr) {
+ fi := f.FileInfo
+ attr.Mode = 0666
+ if fi.IsDir {
+ attr.Mode |= 0111 | os.ModeDir
+ }
+ attr.Mtime = fi.ModTime
+ attr.Size = uint64(fi.Size)
+ return
+}
+
+func (f *File) Lookup(name string, intr fuse.Intr) (fuse.Node, fuse.Error) {
+ return file(path.Join(f.Name, name))
+}
+
+func (f *File) ReadAll(intr fuse.Intr) ([]byte, fuse.Error) {
+ data, err := cl.Read(f.Name)
+ if err != nil {
+ log.Printf("read %s: %v", f.Name, err)
+ return nil, fuse.EIO
+ }
+ return data, nil
+}
+
+func (f *File) ReadDir(intr fuse.Intr) ([]fuse.Dirent, fuse.Error) {
+ fis, err := cl.ReadDir(f.Name)
+ if err != nil {
+ log.Printf("read %s: %v", f.Name, err)
+ return nil, fuse.EIO
+ }
+ var dirs []fuse.Dirent
+ for _, fi := range fis {
+ saveStat(path.Join(f.Name, fi.Name), fi, nil)
+ dirs = append(dirs, fuse.Dirent{Name: fi.Name})
+ }
+ return dirs, nil
+}
+
+func (f *File) WriteAll(data []byte, intr fuse.Intr) fuse.Error {
+ defer delStat(f.Name)
+ if err := cl.Write(f.Name[1:], data); err != nil {
+ log.Printf("write %s: %v", f.Name, err)
+ return fuse.EIO
+ }
+ return nil
+}
+
+func (f *File) Mkdir(req *fuse.MkdirRequest, intr fuse.Intr) (fuse.Node, fuse.Error) {
+ defer delStat(f.Name)
+ p := path.Join(f.Name, req.Name)
+ if err := cl.Create(p[1:], true); err != nil {
+ log.Printf("mkdir %s: %v", p, err)
+ return nil, fuse.EIO
+ }
+ delStat(p)
+ return file(p)
+}
+
+func (f *File) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fuse.Intr) (fuse.Node, fuse.Handle, fuse.Error) {
+ defer delStat(f.Name)
+ p := path.Join(f.Name, req.Name)
+ if err := cl.Create(p[1:], false); err != nil {
+ log.Printf("create %s: %v", p, err)
+ return nil, nil, fuse.EIO
+ }
+ delStat(p)
+ n, err := file(p)
+ if err != nil {
+ return nil, nil, err
+ }
+ return n, n, nil
+}
diff --git a/vendor/github.com/mattermost/rsc/appfs/client/client.go b/vendor/github.com/mattermost/rsc/appfs/client/client.go
new file mode 100644
index 000000000..a1deb291d
--- /dev/null
+++ b/vendor/github.com/mattermost/rsc/appfs/client/client.go
@@ -0,0 +1,150 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package client implements a basic appfs client.
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/mattermost/rsc/appfs/proto"
+)
+
+type Client struct {
+ Host string
+ User string
+ Password string
+}
+
+func (c *Client) url(op, path string) string {
+ scheme := "https"
+ if strings.HasPrefix(c.Host, "localhost:") {
+ scheme = "http"
+ }
+ if strings.HasSuffix(op, "/") && strings.HasPrefix(path, "/") {
+ path = path[1:]
+ }
+ return scheme + "://"+ c.User + ":" + c.Password + "@" + c.Host + op + path
+}
+
+func (c *Client) do(u string) error {
+ _, err := c.get(u)
+ return err
+}
+
+func (c *Client) get(u string) ([]byte, error) {
+ tries := 0
+ for {
+ r, err := http.Get(u)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Body.Close()
+ data, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return nil, err
+ }
+ if r.StatusCode != 200 {
+ if r.StatusCode == 500 {
+ if tries++; tries < 3 {
+ fmt.Printf("%s %s; sleeping\n", r.Status, data)
+ time.Sleep(5*time.Second)
+ continue
+ }
+ }
+ return nil, fmt.Errorf("%s %s", r.Status, data)
+ }
+ return data, nil
+ }
+ panic("unreachable")
+}
+
+func (c *Client) post(u string, data []byte) ([]byte, error) {
+ tries := 0
+ for {
+ r, err := http.Post(u, proto.PostContentType, bytes.NewBuffer(data))
+ if err != nil {
+ return nil, err
+ }
+ defer r.Body.Close()
+ rdata, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return nil, err
+ }
+ if r.StatusCode != 200 {
+ if r.StatusCode == 500 {
+ if tries++; tries < 3 {
+ fmt.Printf("%s %s; sleeping\n", r.Status, rdata)
+ time.Sleep(5*time.Second)
+ continue
+ }
+ }
+ return nil, fmt.Errorf("%s %s", r.Status, rdata)
+ }
+ return rdata, nil
+ }
+ panic("unreachable")
+}
+
+func (c *Client) Create(path string, isdir bool) error {
+ u := c.url(proto.CreateURL, path)
+ if isdir {
+ u += "?dir=1"
+ }
+ return c.do(u)
+}
+
+func (c *Client) Read(path string) ([]byte, error) {
+ return c.get(c.url(proto.ReadURL, path))
+}
+
+func (c *Client) Write(path string, data []byte) error {
+ u := c.url(proto.WriteURL, path)
+ _, err := c.post(u, data)
+ return err
+}
+
+func (c *Client) Mkfs() error {
+ return c.do(c.url(proto.MkfsURL, ""))
+}
+
+func (c *Client) Stat(path string) (*proto.FileInfo, error) {
+ data, err := c.get(c.url(proto.StatURL, path))
+ if err != nil {
+ return nil, err
+ }
+ var fi proto.FileInfo
+ if err := json.Unmarshal(data, &fi); err != nil {
+ return nil, err
+ }
+ return &fi, nil
+}
+
+func (c *Client) ReadDir(path string) ([]*proto.FileInfo, error) {
+ data, err := c.Read(path)
+ if err != nil {
+ return nil, err
+ }
+ dec := json.NewDecoder(bytes.NewBuffer(data))
+ var out []*proto.FileInfo
+ for {
+ var fi proto.FileInfo
+ err := dec.Decode(&fi)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return out, err
+ }
+ out = append(out, &fi)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/mattermost/rsc/appfs/fs/fs.go b/vendor/github.com/mattermost/rsc/appfs/fs/fs.go
new file mode 100644
index 000000000..ac6657393
--- /dev/null
+++ b/vendor/github.com/mattermost/rsc/appfs/fs/fs.go
@@ -0,0 +1,273 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fs is an indirection layer, allowing code to use a
+// file system without knowing whether it is the host file system
+// (running without App Engine) or the datastore-based app
+// file system (running on App Engine).
+//
+// When compiled locally, fs refers to files in the local file system,
+// and the cache saves nothing.
+//
+// When compiled for App Engine, fs uses the appfs file system
+// and the memcache-based cache.
+package fs
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/mattermost/rsc/appfs/proto"
+)
+
+type AppEngine interface {
+ NewContext(req *http.Request) interface{}
+ CacheRead(ctxt interface{}, name, path string) (key interface{}, data []byte, found bool)
+ CacheWrite(ctxt, key interface{}, data []byte)
+ Read(ctxt interface{}, path string) ([]byte, *proto.FileInfo, error)
+ Write(ctxt interface{}, path string, data []byte) error
+ Remove(ctxt interface{}, path string) error
+ Mkdir(ctxt interface{}, path string) error
+ ReadDir(ctxt interface{}, path string) ([]proto.FileInfo, error)
+ Criticalf(ctxt interface{}, format string, args ...interface{})
+ User(ctxt interface{}) string
+}
+
+var ae AppEngine
+
+func Register(impl AppEngine) {
+ ae = impl
+}
+
+// Root is the root of the local file system. It has no effect on App Engine.
+var Root = "."
+
+// A Context is an opaque context that is needed to perform file system
+// operations. Each context is associated with a single HTTP request.
+type Context struct {
+ context
+ ae interface{}
+}
+
+// NewContext returns a context associated with the given HTTP request.
+func NewContext(req *http.Request) *Context {
+ if ae != nil {
+ ctxt := ae.NewContext(req)
+ return &Context{ae: ctxt}
+ }
+ return newContext(req)
+}
+
+// A CacheKey is an opaque cache key that can be used to store new entries
+// in the cache. To ensure that the cache remains consistent with the underlying
+// file system, the correct procedure is:
+//
+// 1. Use CacheRead (or CacheLoad) to attempt to load the entry. If it succeeds, use it.
+// If not, continue, saving the CacheKey.
+//
+// 2. Read from the file system and construct the entry that would have
+// been in the cache. In order to be consistent, all the file system reads
+// should only refer to parts of the file system in the tree rooted at the path
+// passed to CacheRead.
+//
+// 3. Save the entry using CacheWrite (or CacheStore), using the key that was
+// created by the CacheRead (or CacheLoad) executed before reading from the
+// file system.
+//
+type CacheKey struct {
+ cacheKey
+ ae interface{}
+}
+
+// CacheRead reads from cache the entry with the given name and path.
+// The path specifies the scope of information stored in the cache entry.
+// An entry is invalidated by a write to any location in the file tree rooted at path.
+// The name is an uninterpreted identifier to distinguish the cache entry
+// from other entries using the same path.
+//
+// If it finds a cache entry, CacheRead returns the data and found=true.
+// If it does not find a cache entry, CacheRead returns data=nil and found=false.
+// Either way, CacheRead returns an appropriate cache key for storing to the
+// cache entry using CacheWrite.
+func (c *Context) CacheRead(name, path string) (ckey CacheKey, data []byte, found bool) {
+ if ae != nil {
+ key, data, found := ae.CacheRead(c.ae, name, path)
+ return CacheKey{ae: key}, data, found
+ }
+ return c.cacheRead(ckey, path)
+}
+
+// CacheLoad uses CacheRead to load gob-encoded data and decodes it into value.
+func (c *Context) CacheLoad(name, path string, value interface{}) (ckey CacheKey, found bool) {
+ ckey, data, found := c.CacheRead(name, path)
+ if found {
+ if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(value); err != nil {
+ c.Criticalf("gob Decode: %v", err)
+ found = false
+ }
+ }
+ return
+}
+
+// CacheWrite writes an entry to the cache with the given key, path, and data.
+// The cache entry will be invalidated the next time the file tree rooted at path is
+// modified in anyway.
+func (c *Context) CacheWrite(ckey CacheKey, data []byte) {
+ if ae != nil {
+ ae.CacheWrite(c.ae, ckey.ae, data)
+ return
+ }
+ c.cacheWrite(ckey, data)
+}
+
+// CacheStore uses CacheWrite to save the gob-encoded form of value.
+func (c *Context) CacheStore(ckey CacheKey, value interface{}) {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(value); err != nil {
+ c.Criticalf("gob Encode: %v", err)
+ return
+ }
+ c.CacheWrite(ckey, buf.Bytes())
+}
+
+// Read returns the data associated with the file named by path.
+// It is a copy and can be modified without affecting the file.
+func (c *Context) Read(path string) ([]byte, *proto.FileInfo, error) {
+ if ae != nil {
+ return ae.Read(c.ae, path)
+ }
+ return c.read(path)
+}
+
+// Write replaces the data associated with the file named by path.
+func (c *Context) Write(path string, data []byte) error {
+ if ae != nil {
+ return ae.Write(c.ae, path, data)
+ }
+ return c.write(path, data)
+}
+
+// Remove removes the file named by path.
+func (c *Context) Remove(path string) error {
+ if ae != nil {
+ return ae.Remove(c.ae, path)
+ }
+ return c.remove(path)
+}
+
+// Mkdir creates a directory with the given path.
+// If the path already exists and is a directory, Mkdir returns no error.
+func (c *Context) Mkdir(path string) error {
+ if ae != nil {
+ return ae.Mkdir(c.ae, path)
+ }
+ return c.mkdir(path)
+}
+
+// ReadDir returns the contents of the directory named by the path.
+func (c *Context) ReadDir(path string) ([]proto.FileInfo, error) {
+ if ae != nil {
+ return ae.ReadDir(c.ae, path)
+ }
+ return c.readdir(path)
+}
+
+// ServeFile serves the named file as the response to the HTTP request.
+func (c *Context) ServeFile(w http.ResponseWriter, req *http.Request, name string) {
+ root := &httpFS{c, name}
+ http.FileServer(root).ServeHTTP(w, req)
+}
+
+// Criticalf logs the message at critical priority.
+func (c *Context) Criticalf(format string, args ...interface{}) {
+ if ae != nil {
+ ae.Criticalf(c.ae, format, args...)
+ }
+ log.Printf(format, args...)
+}
+
+// User returns the name of the user running the request.
+func (c *Context) User() string {
+ if ae != nil {
+ return ae.User(c.ae)
+ }
+ return os.Getenv("USER")
+}
+
+type httpFS struct {
+ c *Context
+ name string
+}
+
+type httpFile struct {
+ data []byte
+ fi *proto.FileInfo
+ off int
+}
+
+func (h *httpFS) Open(_ string) (http.File, error) {
+ data, fi, err := h.c.Read(h.name)
+ if err != nil {
+ return nil, err
+ }
+ return &httpFile{data, fi, 0}, nil
+}
+
+func (f *httpFile) Close() error {
+ return nil
+}
+
+type fileInfo struct {
+ p *proto.FileInfo
+}
+
+func (f *fileInfo) IsDir() bool { return f.p.IsDir }
+func (f *fileInfo) Name() string { return f.p.Name }
+func (f *fileInfo) ModTime() time.Time { return f.p.ModTime }
+func (f *fileInfo) Size() int64 { return f.p.Size }
+func (f *fileInfo) Sys() interface{} { return f.p }
+func (f *fileInfo) Mode() os.FileMode {
+ if f.p.IsDir {
+ return os.ModeDir | 0777
+ }
+ return 0666
+}
+
+func (f *httpFile) Stat() (os.FileInfo, error) {
+ return &fileInfo{f.fi}, nil
+}
+
+func (f *httpFile) Readdir(count int) ([]os.FileInfo, error) {
+ return nil, fmt.Errorf("no directory")
+}
+
+func (f *httpFile) Read(data []byte) (int, error) {
+ if f.off >= len(f.data) {
+ return 0, io.EOF
+ }
+ n := copy(data, f.data[f.off:])
+ f.off += n
+ return n, nil
+}
+
+func (f *httpFile) Seek(offset int64, whence int) (int64, error) {
+ off := int(offset)
+ if int64(off) != offset {
+ return 0, fmt.Errorf("invalid offset")
+ }
+ switch whence {
+ case 1:
+ off += f.off
+ case 2:
+ off += len(f.data)
+ }
+ f.off = off
+ return int64(off), nil
+}
diff --git a/vendor/github.com/mattermost/rsc/appfs/fs/local.go b/vendor/github.com/mattermost/rsc/appfs/fs/local.go
new file mode 100644
index 000000000..c78b35b64
--- /dev/null
+++ b/vendor/github.com/mattermost/rsc/appfs/fs/local.go
@@ -0,0 +1,82 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fs
+
+import (
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path/filepath"
+
+ "github.com/mattermost/rsc/appfs/proto"
+)
+
+type context struct{}
+
+type cacheKey struct{}
+
+func newContext(req *http.Request) *Context {
+ return &Context{}
+}
+
+func (*context) cacheRead(ckey CacheKey, path string) (CacheKey, []byte, bool) {
+ return ckey, nil, false
+}
+
+func (*context) cacheWrite(ckey CacheKey, data []byte) {
+}
+
+func (*context) read(path string) ([]byte, *proto.FileInfo, error) {
+ p := filepath.Join(Root, path)
+ dir, err := os.Stat(p)
+ if err != nil {
+ return nil, nil, err
+ }
+ fi := &proto.FileInfo{
+ Name: dir.Name(),
+ ModTime: dir.ModTime(),
+ Size: dir.Size(),
+ IsDir: dir.IsDir(),
+ }
+ data, err := ioutil.ReadFile(p)
+ return data, fi, err
+}
+
+func (*context) write(path string, data []byte) error {
+ p := filepath.Join(Root, path)
+ return ioutil.WriteFile(p, data, 0666)
+}
+
+func (*context) remove(path string) error {
+ p := filepath.Join(Root, path)
+ return os.Remove(p)
+}
+
+func (*context) mkdir(path string) error {
+ p := filepath.Join(Root, path)
+ fi, err := os.Stat(p)
+ if err == nil && fi.IsDir() {
+ return nil
+ }
+ return os.Mkdir(p, 0777)
+}
+
+func (*context) readdir(path string) ([]proto.FileInfo, error) {
+ p := filepath.Join(Root, path)
+ dirs, err := ioutil.ReadDir(p)
+ if err != nil {
+ return nil, err
+ }
+ var out []proto.FileInfo
+ for _, dir := range dirs {
+ out = append(out, proto.FileInfo{
+ Name: dir.Name(),
+ ModTime: dir.ModTime(),
+ Size: dir.Size(),
+ IsDir: dir.IsDir(),
+ })
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/mattermost/rsc/appfs/proto/data.go b/vendor/github.com/mattermost/rsc/appfs/proto/data.go
new file mode 100644
index 000000000..ac15411a8
--- /dev/null
+++ b/vendor/github.com/mattermost/rsc/appfs/proto/data.go
@@ -0,0 +1,55 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto defines the protocol between appfs client and server.
+package proto
+
+import "time"
+
+// An Auth appears, JSON-encoded, as the X-Appfs-Auth header line,
+// to authenticate a request made to the file server.
+// The authentication scheme could be made more sophisticated, but since
+// we are already forcing the use of TLS, a plain password is fine for now.
+type Auth struct {
+ Password string
+}
+
+// GET /.appfs/stat/path returns the metadata for a file or directory,
+// a JSON-encoded FileInfo.
+const StatURL = "/.appfs/stat/"
+
+// GET /.appfs/read/path returns the content of the file or directory.
+// The body of the response is the raw file or directory content.
+// The content of a directory is a sequence of JSON-encoded FileInfo.
+const ReadURL = "/.appfs/read/"
+
+// POST to /.appfs/write/path writes new data to a file.
+// The X-Appfs-SHA1 header is the SHA1 hash of the data.
+// The body of the request is the raw file content.
+const WriteURL = "/.appfs/write/"
+
+// POST to /.appfs/mount initializes the file system if it does not
+// yet exist in the datastore.
+const MkfsURL = "/.appfs/mkfs"
+
+// POST to /.appfs/create/path creates a new file or directory.
+// The named path must not already exist; its parent must exist.
+// The query parameter dir=1 indicates that a directory should be created.
+const CreateURL = "/.appfs/create/"
+
+// POST to /.appfs/remove/path removes the file or directory.
+// A directory must be empty to be removed.
+const RemoveURL = "/.appfs/remove/"
+
+// A FileInfo is a directory entry.
+type FileInfo struct {
+ Name string // final path element
+ ModTime time.Time
+ Size int64
+ IsDir bool
+}
+
+// PostContentType is the Content-Type for POSTed data.
+// There is no encoding or framing: it is just raw data bytes.
+const PostContentType = "x-appfs/raw"
diff --git a/vendor/github.com/mattermost/rsc/appfs/server/app.go b/vendor/github.com/mattermost/rsc/appfs/server/app.go
new file mode 100644
index 000000000..9486eac41
--- /dev/null
+++ b/vendor/github.com/mattermost/rsc/appfs/server/app.go
@@ -0,0 +1,982 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package server implements an appfs server backed by the
+// App Engine datastore.
+package server
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "appengine"
+ "appengine/datastore"
+ "appengine/memcache"
+ "appengine/user"
+
+ "github.com/mattermost/rsc/appfs/fs"
+ "github.com/mattermost/rsc/appfs/proto"
+)
+
+const pwFile = "/.password"
+var chatty = false
+
+func init() {
+ handle(proto.ReadURL, (*request).read)
+ handle(proto.WriteURL, (*request).write)
+ handle(proto.StatURL, (*request).stat)
+ handle(proto.MkfsURL, (*request).mkfs)
+ handle(proto.CreateURL, (*request).create)
+ handle(proto.RemoveURL, (*request).remove)
+}
+
+type request struct {
+ w http.ResponseWriter
+ req *http.Request
+ c appengine.Context
+ name string
+ mname string
+ key *datastore.Key
+}
+
+func auth(r *request) bool {
+ hdr := r.req.Header.Get("Authorization")
+ if !strings.HasPrefix(hdr, "Basic ") {
+ return false
+ }
+ data, err := base64.StdEncoding.DecodeString(hdr[6:])
+ if err != nil {
+ return false
+ }
+ i := bytes.IndexByte(data, ':')
+ if i < 0 {
+ return false
+ }
+ user, passwd := string(data[:i]), string(data[i+1:])
+
+ _, data, err = read(r.c, pwFile)
+ if err != nil {
+ r.c.Errorf("reading %s: %v", pwFile, err)
+ if _, err := mkfs(r.c); err != nil {
+ r.c.Errorf("creating fs: %v", err)
+ }
+ _, data, err = read(r.c, pwFile)
+ if err != nil {
+ r.c.Errorf("reading %s again: %v", pwFile, err)
+ return false
+ }
+ }
+
+ lines := strings.Split(string(data), "\n")
+ for _, line := range lines {
+ if strings.HasPrefix(line, "#") {
+ continue
+ }
+ f := strings.Fields(line)
+ if len(f) < 3 {
+ continue
+ }
+ if f[0] == user {
+ return hash(f[1]+passwd) == f[2]
+ }
+ }
+ return false
+}
+
+func hash(s string) string {
+ h := sha1.New()
+ h.Write([]byte(s))
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func handle(prefix string, f func(*request)) {
+ http.HandleFunc(prefix, func(w http.ResponseWriter, req *http.Request) {
+ c := appengine.NewContext(req)
+ r := &request{
+ w: w,
+ req: req,
+ c: c,
+ }
+
+ if strings.HasSuffix(prefix, "/") {
+ r.name, r.mname, r.key = mangle(c, req.URL.Path[len(prefix)-1:])
+ } else {
+ req.URL.Path = "/"
+ }
+ defer func() {
+ if err := recover(); err != nil {
+ w.WriteHeader(http.StatusConflict)
+ fmt.Fprintf(w, "%s\n", err)
+ }
+ }()
+
+ if !auth(r) {
+ w.Header().Set("WWW-Authenticate", "Basic realm=\"appfs\"")
+ http.Error(w, "Need auth", http.StatusUnauthorized)
+ return
+ }
+
+ f(r)
+ })
+}
+
+func mangle(c appengine.Context, name string) (string, string, *datastore.Key) {
+ name = path.Clean("/" + name)
+ n := strings.Count(name, "/")
+ if name == "/" {
+ n = 0
+ }
+ mname := fmt.Sprintf("%d%s", n, name)
+ root := datastore.NewKey(c, "RootKey", "v2:", 0, nil)
+ key := datastore.NewKey(c, "FileInfo", mname, 0, root)
+ return name, mname, key
+}
+
+type FileInfo struct {
+ Path string // mangled path
+ Name string
+ Qid int64 // assigned unique id number
+ Seq int64 // modification sequence number in file tree
+ ModTime time.Time
+ Size int64
+ IsDir bool
+}
+
+type FileData struct {
+ Data []byte
+}
+
+func stat(c appengine.Context, name string) (*FileInfo, error) {
+ var fi FileInfo
+ name, _, key := mangle(c, name)
+ c.Infof("DATASTORE Stat %q", name)
+ err := datastore.Get(c, key, &fi)
+ if err != nil {
+ return nil, err
+ }
+ return &fi, nil
+}
+
+func (r *request) saveStat(fi *FileInfo) {
+ jfi, err := json.Marshal(&fi)
+ if err != nil {
+ panic(err)
+ }
+ r.w.Header().Set("X-Appfs-Stat", string(jfi))
+}
+
+func (r *request) tx(f func(c appengine.Context) error) {
+ err := datastore.RunInTransaction(r.c, f, &datastore.TransactionOptions{XG: true})
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (r *request) stat() {
+ var fi *FileInfo
+ r.tx(func(c appengine.Context) error {
+ fi1, err := stat(c, r.name)
+ if err != nil {
+ return err
+ }
+ fi = fi1
+ return nil
+ })
+
+ jfi, err := json.Marshal(&fi)
+ if err != nil {
+ panic(err)
+ }
+ r.w.Write(jfi)
+}
+
+func read(c appengine.Context, name string) (fi *FileInfo, data []byte, err error) {
+ name, _, _ = mangle(c, name)
+ fi1, err := stat(c, name)
+ if err != nil {
+ return nil, nil, err
+ }
+ if fi1.IsDir {
+ dt, err := readdir(c, name)
+ if err != nil {
+ return nil, nil, err
+ }
+ fi = fi1
+ data = dt
+ return fi, data, nil
+ }
+
+ root := datastore.NewKey(c, "RootKey", "v2:", 0, nil)
+ dkey := datastore.NewKey(c, "FileData", "", fi1.Qid, root)
+ var fd FileData
+ c.Infof("DATASTORE Read %q", name)
+ if err := datastore.Get(c, dkey, &fd); err != nil {
+ return nil, nil, err
+ }
+ fi = fi1
+ data = fd.Data
+ return fi, data, nil
+}
+
+func (r *request) read() {
+ var (
+ fi *FileInfo
+ data []byte
+ )
+ r.tx(func(c appengine.Context) error {
+ var err error
+ fi, data, err = read(r.c, r.name)
+ return err
+ })
+ r.saveStat(fi)
+ r.w.Write(data)
+}
+
+func readdir(c appengine.Context, name string) ([]byte, error) {
+ name, _, _ = mangle(c, name)
+ var buf bytes.Buffer
+
+ n := strings.Count(name, "/")
+ if name == "/" {
+ name = ""
+ n = 0
+ }
+ root := datastore.NewKey(c, "RootKey", "v2:", 0, nil)
+ first := fmt.Sprintf("%d%s/", n+1, name)
+ limit := fmt.Sprintf("%d%s0", n+1, name)
+ c.Infof("DATASTORE ReadDir %q", name)
+ q := datastore.NewQuery("FileInfo").
+ Filter("Path >=", first).
+ Filter("Path <", limit).
+ Ancestor(root)
+ enc := json.NewEncoder(&buf)
+ it := q.Run(c)
+ var fi FileInfo
+ var pfi proto.FileInfo
+ for {
+ fi = FileInfo{}
+ _, err := it.Next(&fi)
+ if err != nil {
+ if err == datastore.Done {
+ break
+ }
+ return nil, err
+ }
+ pfi = proto.FileInfo{
+ Name: fi.Name,
+ ModTime: fi.ModTime,
+ Size: fi.Size,
+ IsDir: fi.IsDir,
+ }
+ if err := enc.Encode(&pfi); err != nil {
+ return nil, err
+ }
+ }
+
+ return buf.Bytes(), nil
+}
+
+func readdirRaw(c appengine.Context, name string) ([]proto.FileInfo, error) {
+ name, _, _ = mangle(c, name)
+ n := strings.Count(name, "/")
+ if name == "/" {
+ name = ""
+ n = 0
+ }
+ root := datastore.NewKey(c, "RootKey", "v2:", 0, nil)
+ first := fmt.Sprintf("%d%s/", n+1, name)
+ limit := fmt.Sprintf("%d%s0", n+1, name)
+ c.Infof("DATASTORE ReadDir %q", name)
+ q := datastore.NewQuery("FileInfo").
+ Filter("Path >=", first).
+ Filter("Path <", limit).
+ Ancestor(root)
+ it := q.Run(c)
+ var fi FileInfo
+ var pfi proto.FileInfo
+ var out []proto.FileInfo
+ for {
+ fi = FileInfo{}
+ _, err := it.Next(&fi)
+ if err != nil {
+ if err == datastore.Done {
+ break
+ }
+ return nil, err
+ }
+ pfi = proto.FileInfo{
+ Name: fi.Name,
+ ModTime: fi.ModTime,
+ Size: fi.Size,
+ IsDir: fi.IsDir,
+ }
+ out = append(out, pfi)
+ }
+println("READDIR", name, len(out))
+ return out, nil
+}
+
+
+var initPasswd = `# Password file
+# This file controls access to the server.
+# The format is lines of space-separated fields:
+# user salt pwhash
+# The pwhash is the SHA1 of the salt string concatenated with the password.
+
+# user=dummy password=dummy (replace with your own entries)
+dummy 12345 faa863c7d3d41893f80165c704b714d5e31bdd3b
+`
+
+func (r *request) mkfs() {
+ var fi *FileInfo
+ r.tx(func(c appengine.Context) error {
+ var err error
+ fi, err = mkfs(c)
+ return err
+ })
+ r.saveStat(fi)
+}
+
+func mkfs(c appengine.Context) (fi *FileInfo, err error) {
+ fi1, err := stat(c, "/")
+ if err == nil {
+ return fi1, nil
+ }
+
+ // Root needs to be created.
+ // Probably root key does too.
+ root := datastore.NewKey(c, "RootKey", "v2:", 0, nil)
+ _, err = datastore.Put(c, root, &struct{}{})
+ if err != nil {
+ return nil, fmt.Errorf("mkfs put root: %s", err)
+ }
+
+ // Entry for /.
+ _, mpath, key := mangle(c, "/")
+ fi3 := FileInfo{
+ Path: mpath,
+ Name: "/",
+ Seq: 2, // 2, not 1, because we're going to write password file with #2
+ Qid: 1,
+ ModTime: time.Now(),
+ Size: 0,
+ IsDir: true,
+ }
+ _, err = datastore.Put(c, key, &fi3)
+ if err != nil {
+ return nil, fmt.Errorf("mkfs put /: %s", err)
+ }
+
+ /*
+ * Would like to use this code but App Engine apparently
+ * does not let Get observe the effect of a Put in the same
+ * transaction. What planet does that make sense on?
+ * Instead, we have to execute just the datastore writes that this
+ * sequence would.
+ *
+ _, err = create(c, pwFile, false)
+ if err != nil {
+ return nil, fmt.Errorf("mkfs create .password: %s", err)
+ }
+ _, err = write(c, pwFile, []byte(initPasswd))
+ if err != nil {
+ return nil, fmt.Errorf("mkfs write .password: %s", err)
+ }
+ *
+ */
+
+ {
+ name, mname, key := mangle(c, pwFile)
+
+ // Create data object.
+ dataKey := int64(2)
+ root := datastore.NewKey(c, "RootKey", "v2:", 0, nil)
+ dkey := datastore.NewKey(c, "FileData", "", dataKey, root)
+ _, err := datastore.Put(c, dkey, &FileData{[]byte(initPasswd)})
+ if err != nil {
+ return nil, err
+ }
+
+ // Create new directory entry.
+ _, elem := path.Split(name)
+ fi1 = &FileInfo{
+ Path: mname,
+ Name: elem,
+ Qid: 2,
+ Seq: 2,
+ ModTime: time.Now(),
+ Size: int64(len(initPasswd)),
+ IsDir: false,
+ }
+ if _, err := datastore.Put(c, key, fi1); err != nil {
+ return nil, err
+ }
+ }
+
+ return &fi3, nil
+}
+
+func (r *request) write() {
+ data, err := ioutil.ReadAll(r.req.Body)
+ if err != nil {
+ panic(err)
+ }
+
+ var fi *FileInfo
+ var seq int64
+ r.tx(func(c appengine.Context) error {
+ var err error
+ fi, seq, err = write(r.c, r.name, data)
+ return err
+ })
+ updateCacheTime(r.c, seq)
+ r.saveStat(fi)
+}
+
+func write(c appengine.Context, name string, data []byte) (*FileInfo, int64, error) {
+ name, _, key := mangle(c, name)
+
+ // Check that file exists and is not a directory.
+ fi1, err := stat(c, name)
+ if err != nil {
+ return nil, 0, err
+ }
+ if fi1.IsDir {
+ return nil, 0, fmt.Errorf("cannot write to directory")
+ }
+
+ // Fetch and increment root sequence number.
+ rfi, err := stat(c, "/")
+ if err != nil {
+ return nil, 0, err
+ }
+ rfi.Seq++
+
+ // Write data.
+ root := datastore.NewKey(c, "RootKey", "v2:", 0, nil)
+ dkey := datastore.NewKey(c, "FileData", "", fi1.Qid, root)
+ fd := &FileData{data}
+ if _, err := datastore.Put(c, dkey, fd); err != nil {
+ return nil, 0, err
+ }
+
+ // Update directory entry.
+ fi1.Seq = rfi.Seq
+ fi1.Size = int64(len(data))
+ fi1.ModTime = time.Now()
+ if _, err := datastore.Put(c, key, fi1); err != nil {
+ return nil, 0, err
+ }
+
+ // Update sequence numbers all the way to the root.
+ if err := updateSeq(c, name, rfi.Seq, 1); err != nil {
+ return nil, 0, err
+ }
+
+ return fi1, rfi.Seq, nil
+}
+
+func updateSeq(c appengine.Context, name string, seq int64, skip int) error {
+ p := path.Clean(name)
+ for i := 0; ; i++ {
+ if i >= skip {
+ _, _, key := mangle(c, p)
+ var fi FileInfo
+ if err := datastore.Get(c, key, &fi); err != nil {
+ return err
+ }
+ fi.Seq = seq
+ if _, err := datastore.Put(c, key, &fi); err != nil {
+ return err
+ }
+ }
+ if p == "/" {
+ break
+ }
+ p, _ = path.Split(p)
+ p = path.Clean(p)
+ }
+ return nil
+}
+
+func (r *request) remove() {
+ panic("remove not implemented")
+}
+
+func (r *request) create() {
+ var fi *FileInfo
+ var seq int64
+ isDir := r.req.FormValue("dir") == "1"
+ r.tx(func(c appengine.Context) error {
+ var err error
+ fi, seq, err = create(r.c, r.name, isDir, nil)
+ return err
+ })
+ updateCacheTime(r.c, seq)
+ r.saveStat(fi)
+}
+
+func create(c appengine.Context, name string, isDir bool, data []byte) (*FileInfo, int64, error) {
+ name, mname, key := mangle(c, name)
+
+ // File must not exist.
+ fi1, err := stat(c, name)
+ if err == nil {
+ return nil, 0, fmt.Errorf("file already exists")
+ }
+ if err != datastore.ErrNoSuchEntity {
+ return nil, 0, err
+ }
+
+ // Parent must exist and be a directory.
+ p, _ := path.Split(name)
+ fi2, err := stat(c, p)
+ if err != nil {
+ if err == datastore.ErrNoSuchEntity {
+ return nil, 0, fmt.Errorf("parent directory %q does not exist", p)
+ }
+ return nil, 0, err
+ }
+ if !fi2.IsDir {
+ return nil, 0, fmt.Errorf("parent %q is not a directory", p)
+ }
+
+ // Fetch and increment root sequence number.
+ rfi, err := stat(c, "/")
+ if err != nil {
+ return nil, 0, err
+ }
+ rfi.Seq++
+
+ var dataKey int64
+ // Create data object.
+ if !isDir {
+ dataKey = rfi.Seq
+ root := datastore.NewKey(c, "RootKey", "v2:", 0, nil)
+ dkey := datastore.NewKey(c, "FileData", "", dataKey, root)
+ _, err := datastore.Put(c, dkey, &FileData{data})
+ if err != nil {
+ return nil, 0, err
+ }
+ }
+
+ // Create new directory entry.
+ _, elem := path.Split(name)
+ fi1 = &FileInfo{
+ Path: mname,
+ Name: elem,
+ Qid: rfi.Seq,
+ Seq: rfi.Seq,
+ ModTime: time.Now(),
+ Size: int64(len(data)),
+ IsDir: isDir,
+ }
+ if _, err := datastore.Put(c, key, fi1); err != nil {
+ return nil, 0, err
+ }
+
+ // Update sequence numbers all the way to root,
+ // but skip entry we just wrote.
+ if err := updateSeq(c, name, rfi.Seq, 1); err != nil {
+ return nil, 0, err
+ }
+
+ return fi1, rfi.Seq, nil
+}
+
+// Implementation of fs.AppEngine.
+
+func init() {
+ fs.Register(ae{})
+}
+
+type ae struct{}
+
+func tx(c interface{}, f func(c appengine.Context) error) error {
+ return datastore.RunInTransaction(c.(appengine.Context), f, &datastore.TransactionOptions{XG: true})
+}
+
+func (ae) NewContext(req *http.Request) interface{} {
+ return appengine.NewContext(req)
+}
+
+func (ae) User(ctxt interface{}) string {
+ c := ctxt.(appengine.Context)
+ u := user.Current(c)
+ if u == nil {
+ return "?"
+ }
+ return u.String()
+}
+
+type cacheKey struct {
+ t int64
+ name string
+}
+
+func (ae) CacheRead(ctxt interface{}, name, path string) (key interface{}, data []byte, found bool) {
+ c := ctxt.(appengine.Context)
+ t, data, _, err := cacheRead(c, "cache", name, path)
+ return &cacheKey{t, name}, data, err == nil
+}
+
+func (ae) CacheWrite(ctxt, key interface{}, data []byte) {
+ c := ctxt.(appengine.Context)
+ k := key.(*cacheKey)
+ cacheWrite(c, k.t, "cache", k.name, data)
+}
+
+func (ae ae) Read(ctxt interface{}, name string) (data []byte, pfi *proto.FileInfo, err error) {
+ c := ctxt.(appengine.Context)
+ name = path.Clean("/"+name)
+ if chatty {
+ c.Infof("AE Read %s", name)
+ }
+ _, data, pfi, err = cacheRead(c, "data", name, name)
+ if err != nil {
+ err = fmt.Errorf("Read %q: %v", name, err)
+ }
+ return
+}
+
+func (ae) Write(ctxt interface{}, path string, data []byte) error {
+ var seq int64
+ err := tx(ctxt, func(c appengine.Context) error {
+ _, err := stat(c, path)
+ if err != nil {
+ _, seq, err = create(c, path, false, data)
+ } else {
+ _, seq, err = write(c, path, data)
+ }
+ return err
+ })
+ if seq != 0 {
+ updateCacheTime(ctxt.(appengine.Context), seq)
+ }
+ if err != nil {
+ err = fmt.Errorf("Write %q: %v", path, err)
+ }
+ return err
+}
+
+func (ae) Remove(ctxt interface{}, path string) error {
+ return fmt.Errorf("remove not implemented")
+}
+
+func (ae) Mkdir(ctxt interface{}, path string) error {
+ var seq int64
+ err := tx(ctxt, func(c appengine.Context) error {
+ var err error
+ _, seq, err = create(c, path, true, nil)
+ return err
+ })
+ if seq != 0 {
+ updateCacheTime(ctxt.(appengine.Context), seq)
+ }
+ if err != nil {
+ err = fmt.Errorf("Mkdir %q: %v", path, err)
+ }
+ return err
+}
+
+func (ae) Criticalf(ctxt interface{}, format string, args ...interface{}) {
+ ctxt.(appengine.Context).Criticalf(format, args...)
+}
+
+type readDirCacheEntry struct {
+ Dir []proto.FileInfo
+ Error string
+}
+
+func (ae) ReadDir(ctxt interface{}, name string) (dir []proto.FileInfo, err error) {
+ c := ctxt.(appengine.Context)
+ name = path.Clean("/"+name)
+ t, data, _, err := cacheRead(c, "dir", name, name)
+ if err == nil {
+ var e readDirCacheEntry
+ if err := json.Unmarshal(data, &e); err == nil {
+ if chatty {
+ c.Infof("cached ReadDir %q", name)
+ }
+ if e.Error != "" {
+ return nil, errors.New(e.Error)
+ }
+ return e.Dir, nil
+ }
+ c.Criticalf("unmarshal cached dir %q: %v", name)
+ }
+ err = tx(ctxt, func(c appengine.Context) error {
+ var err error
+ dir, err = readdirRaw(c, name)
+ return err
+ })
+ var e readDirCacheEntry
+ e.Dir = dir
+ if err != nil {
+ err = fmt.Errorf("ReadDir %q: %v", name, err)
+ e.Error = err.Error()
+ }
+ if data, err := json.Marshal(&e); err != nil {
+ c.Criticalf("json marshal cached dir: %v", err)
+ } else {
+ c.Criticalf("caching dir %q@%d %d bytes", name, t, len(data))
+ cacheWrite(c, t, "dir", name, data)
+ }
+ return
+}
+
+// Caching of file system data.
+//
+// The cache stores entries under keys of the form time,space,name,
+// where time is the time at which the entry is valid for, space is a name
+// space identifier, and name is an arbitrary name.
+//
+// A key of the form t,mtime,path maps to an integer value giving the
+// modification time of the named path at root time t.
+// The special key 0,mtime,/ is an integer giving the current time at the root.
+//
+// A key of the form t,data,path maps to the content of path at time t.
+//
+// Thus, a read from path should first obtain the root time,
+// then obtain the modification time for the path at that root time
+// then obtain the data for that path.
+// t1 = get(0,mtime,/)
+// t2 = get(t1,mtime,path)
+// data = get(t2,data,path)
+//
+// The API allows clients to cache their own data too, with expiry tied to
+// the modification time of a particular path (file or directory). To look
+// up one of those, we use:
+// t1 = get(0,mtime,/)
+// t2 = get(t1,mtime,path)
+// data = get(t2,clientdata,name)
+//
+// To store data in the cache, the t1, t2 should be determined before reading
+// from datastore. Then the data should be saved under t2. This ensures
+// that if a datastore update happens after the read but before the cache write,
+// we'll be writing to an entry that will no longer be used (t2).
+
+const rootMemcacheKey = "0,mtime,/"
+
+func updateCacheTime(c appengine.Context, seq int64) {
+ const key = rootMemcacheKey
+ bseq := []byte(strconv.FormatInt(seq, 10))
+ for tries := 0; tries < 10; tries++ {
+ item, err := memcache.Get(c, key)
+ if err != nil {
+ c.Infof("memcache.Get %q: %v", key, err)
+ err = memcache.Add(c, &memcache.Item{Key: key, Value: bseq})
+ if err == nil {
+ c.Infof("memcache.Add %q %q ok", key, bseq)
+ return
+ }
+ c.Infof("memcache.Add %q %q: %v", key, bseq, err)
+ }
+ v, err := strconv.ParseInt(string(item.Value), 10, 64)
+ if err != nil {
+ c.Criticalf("memcache.Get %q = %q (%v)", key, item.Value, err)
+ return
+ }
+ if v >= seq {
+ return
+ }
+ item.Value = bseq
+ err = memcache.CompareAndSwap(c, item)
+ if err == nil {
+ c.Infof("memcache.CAS %q %d->%d ok", key, v, seq)
+ return
+ }
+ c.Infof("memcache.CAS %q %d->%d: %v", key, v, seq, err)
+ }
+ c.Criticalf("repeatedly failed to update root key")
+}
+
+func cacheTime(c appengine.Context) (t int64, err error) {
+ const key = rootMemcacheKey
+ item, err := memcache.Get(c, key)
+ if err == nil {
+ v, err := strconv.ParseInt(string(item.Value), 10, 64)
+ if err == nil {
+ if chatty {
+ c.Infof("cacheTime %q = %v", key, v)
+ }
+ return v, nil
+ }
+ c.Criticalf("memcache.Get %q = %q (%v) - deleting", key, item.Value, err)
+ memcache.Delete(c, key)
+ }
+ fi, err := stat(c, "/")
+ if err != nil {
+ c.Criticalf("stat /: %v", err)
+ return 0, err
+ }
+ updateCacheTime(c, fi.Seq)
+ return fi.Seq, nil
+}
+
+func cachePathTime(c appengine.Context, path string) (t int64, err error) {
+ t, err = cacheTime(c)
+ if err != nil {
+ return 0, err
+ }
+
+ key := fmt.Sprintf("%d,mtime,%s", t, path)
+ item, err := memcache.Get(c, key)
+ if err == nil {
+ v, err := strconv.ParseInt(string(item.Value), 10, 64)
+ if err == nil {
+ if chatty {
+ c.Infof("cachePathTime %q = %v", key, v)
+ }
+ return v, nil
+ }
+ c.Criticalf("memcache.Get %q = %q (%v) - deleting", key, item.Value, err)
+ memcache.Delete(c, key)
+ }
+
+ var seq int64
+ if fi, err := stat(c, path); err == nil {
+ seq = fi.Seq
+ }
+
+ c.Infof("cachePathTime save %q = %v", key, seq)
+ item = &memcache.Item{Key: key, Value: []byte(strconv.FormatInt(seq, 10))}
+ if err := memcache.Set(c, item); err != nil {
+ c.Criticalf("memcache.Set %q %q: %v", key, item.Value, err)
+ }
+ return seq, nil
+}
+
+type statCacheEntry struct {
+ FileInfo *proto.FileInfo
+ Error string
+}
+
+func cacheRead(c appengine.Context, kind, name, path string) (mtime int64, data []byte, pfi *proto.FileInfo, err error) {
+ for tries := 0; tries < 10; tries++ {
+ t, err := cachePathTime(c, path)
+ if err != nil {
+ return 0, nil, nil, err
+ }
+
+ key := fmt.Sprintf("%d,%s,%s", t, kind, name)
+ item, err := memcache.Get(c, key)
+ var data []byte
+ if item != nil {
+ data = item.Value
+ }
+ if err != nil {
+ c.Infof("memcache miss %q %v", key, err)
+ } else if chatty {
+ c.Infof("memcache hit %q (%d bytes)", key, len(data))
+ }
+ if kind != "data" {
+ // Not a file; whatever memcache says is all we have.
+ return t, data, nil, err
+ }
+
+ // Load stat from cache (includes negative entry).
+ statkey := fmt.Sprintf("%d,stat,%s", t, name)
+ var st statCacheEntry
+ _, err = memcache.JSON.Get(c, statkey, &st)
+ if err == nil {
+ if st.Error != "" {
+ if chatty {
+ c.Infof("memcache hit stat error %q %q", statkey, st.Error)
+ }
+ err = errors.New(st.Error)
+ } else {
+ if chatty {
+ c.Infof("memcache hit stat %q", statkey)
+ }
+ }
+ if err != nil || data != nil {
+ return t, data, st.FileInfo, err
+ }
+ }
+
+ // Need stat, or maybe stat+data.
+ var fi *FileInfo
+ if data != nil {
+ c.Infof("stat %q", name)
+ fi, err = stat(c, name)
+ if err == nil && fi.Seq != t {
+ c.Criticalf("loaded %s but found stat %d", key, fi.Seq)
+ continue
+ }
+ } else {
+ c.Infof("read %q", name)
+ fi, data, err = read(c, name)
+ if err == nil && fi.Seq != t {
+ c.Infof("loaded %s but found read %d", key, fi.Seq)
+ t = fi.Seq
+ key = fmt.Sprintf("%d,data,%s", t, name)
+ statkey = fmt.Sprintf("%d,stat,%s", t, name)
+ }
+
+ // Save data to memcache.
+ if err == nil {
+ if true || chatty {
+ c.Infof("save data in memcache %q", key)
+ }
+ item := &memcache.Item{Key: key, Value: data}
+ if err := memcache.Set(c, item); err != nil {
+ c.Criticalf("failed to cache %s: %v", key, err)
+ }
+ }
+ }
+
+ // Cache stat, including error.
+ st = statCacheEntry{}
+ if fi != nil {
+ st.FileInfo = &proto.FileInfo{
+ Name: fi.Name,
+ ModTime: fi.ModTime,
+ Size: fi.Size,
+ IsDir: fi.IsDir,
+ }
+ }
+ if err != nil {
+ st.Error = err.Error()
+ // If this is a deadline exceeded, do not cache.
+ if strings.Contains(st.Error, "Canceled") || strings.Contains(st.Error, "Deadline") {
+ return t, data, st.FileInfo, err
+ }
+ }
+ if chatty {
+ c.Infof("save stat in memcache %q", statkey)
+ }
+ if err := memcache.JSON.Set(c, &memcache.Item{Key: statkey, Object: &st}); err != nil {
+ c.Criticalf("failed to cache %s: %v", statkey, err)
+ }
+
+ // Done!
+ return t, data, st.FileInfo, err
+ }
+
+ c.Criticalf("failed repeatedly in cacheRead")
+ return 0, nil, nil, errors.New("cacheRead loop failed")
+}
+
+func cacheWrite(c appengine.Context, t int64, kind, name string, data []byte) error {
+ mkey := fmt.Sprintf("%d,%s,%s", t, kind, name)
+ if true || chatty {
+ c.Infof("cacheWrite %s %d bytes", mkey, len(data))
+ }
+ err := memcache.Set(c, &memcache.Item{Key: mkey, Value: data})
+ if err != nil {
+ c.Criticalf("cacheWrite memcache.Set %q: %v", mkey, err)
+ }
+ return err
+}