summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/hashicorp/hcl/json
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/hashicorp/hcl/json')
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/flatten.go117
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/parser.go313
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/parser_test.go384
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json4
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_128.json1
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_tf_8110.json7
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json3
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/good_input_tf_8110.json7
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json5
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json10
-rw-r--r--vendor/github.com/hashicorp/hcl/json/scanner/scanner.go451
-rw-r--r--vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go362
-rw-r--r--vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json4
-rw-r--r--vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json3
-rw-r--r--vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json5
-rw-r--r--vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json10
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/position.go46
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/token.go118
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/token_test.go34
19 files changed, 1884 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
new file mode 100644
index 000000000..f652d6fe7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
@@ -0,0 +1,117 @@
+package parser
+
+import "github.com/hashicorp/hcl/hcl/ast"
+
+// flattenObjects takes an AST node, walks it, and flattens
+func flattenObjects(node ast.Node) {
+ ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
+ // We only care about lists, because this is what we modify
+ list, ok := n.(*ast.ObjectList)
+ if !ok {
+ return n, true
+ }
+
+ // Rebuild the item list
+ items := make([]*ast.ObjectItem, 0, len(list.Items))
+ frontier := make([]*ast.ObjectItem, len(list.Items))
+ copy(frontier, list.Items)
+ for len(frontier) > 0 {
+ // Pop the current item
+ n := len(frontier)
+ item := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ switch v := item.Val.(type) {
+ case *ast.ObjectType:
+ items, frontier = flattenObjectType(v, item, items, frontier)
+ case *ast.ListType:
+ items, frontier = flattenListType(v, item, items, frontier)
+ default:
+ items = append(items, item)
+ }
+ }
+
+ // Reverse the list since the frontier model runs things backwards
+ for i := len(items)/2 - 1; i >= 0; i-- {
+ opp := len(items) - 1 - i
+ items[i], items[opp] = items[opp], items[i]
+ }
+
+ // Done! Set the original items
+ list.Items = items
+ return n, true
+ })
+}
+
+func flattenListType(
+ ot *ast.ListType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list is empty, keep the original list
+ if len(ot.List) == 0 {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List {
+ if _, ok := subitem.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, elem := range ot.List {
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: item.Keys,
+ Assign: item.Assign,
+ Val: elem,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
+
+func flattenObjectType(
+ ot *ast.ObjectType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list has no items we do not have to flatten anything
+ if ot.List.Items == nil {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List.Items {
+ if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, subitem := range ot.List.Items {
+ // Copy the new key
+ keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+ copy(keys, item.Keys)
+ copy(keys[len(item.Keys):], subitem.Keys)
+
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: keys,
+ Assign: item.Assign,
+ Val: subitem.Val,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 000000000..6f4608530
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+ "github.com/hashicorp/hcl/json/scanner"
+ "github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = fmt.Errorf("%s: %s", pos, msg)
+ }
+
+ // The root must be an object in JSON
+ object, err := p.object()
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // We make our final node an object list so it is more HCL compatible
+ f.Node = object.List
+
+ // Flatten it, which finds patterns and turns them into more HCL-like
+ // AST trees.
+ flattenObjects(f.Node)
+
+ return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // Check for a followup comma. If it isn't a comma, then we're done
+ if tok := p.scan(); tok.Type != token.COMMA {
+ break
+ }
+ }
+
+ return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ switch p.tok.Type {
+ case token.COLON:
+ pos := p.tok.Pos
+ o.Assign = hcltoken.Pos{
+ Filename: pos.Filename,
+ Offset: pos.Offset,
+ Line: pos.Line,
+ Column: pos.Column,
+ }
+
+ o.Val, err = p.objectValue()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ return nil, errEofToken
+ case token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{
+ Token: p.tok.HCLToken(),
+ })
+ case token.COLON:
+ // If we have a zero keycount it means that we never got
+ // an object key, i.e. `{ :`. This is a syntax error.
+ if keyCount == 0 {
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+
+ // Done
+ return keys, nil
+ case token.ILLEGAL:
+ fmt.Println("illegal")
+ default:
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+ defer un(trace(p, "ParseObjectValue"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.LBRACE:
+ return p.objectType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{}
+
+ l, err := p.objectList()
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ o.List = l
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{}
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.STRING:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.COMMA:
+ continue
+ case token.LBRACE:
+ node, err := p.objectType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.BOOL:
+ // TODO(arslan) should we support? not supported by HCL yet
+ case token.LBRACK:
+ // TODO(arslan) should we support nested lists? Even though it's
+ // written in README of HCL, it's not a part of the grammar
+ // (not defined in parse.y)
+ case token.RBRACK:
+ // finished
+ return l, nil
+ default:
+ return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+ }
+
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok.HCLToken(),
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ p.tok = p.sc.Scan()
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go
new file mode 100644
index 000000000..e0cebf50a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go
@@ -0,0 +1,384 @@
+package parser
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+func TestType(t *testing.T) {
+ var literals = []struct {
+ typ token.Type
+ src string
+ }{
+ {token.STRING, `"foo": "bar"`},
+ {token.NUMBER, `"foo": 123`},
+ {token.FLOAT, `"foo": 123.12`},
+ {token.FLOAT, `"foo": -123.12`},
+ {token.BOOL, `"foo": true`},
+ {token.STRING, `"foo": null`},
+ }
+
+ for _, l := range literals {
+ t.Logf("Testing: %s", l.src)
+
+ p := newParser([]byte(l.src))
+ item, err := p.objectItem()
+ if err != nil {
+ t.Error(err)
+ }
+
+ lit, ok := item.Val.(*ast.LiteralType)
+ if !ok {
+ t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+ }
+
+ if lit.Token.Type != l.typ {
+ t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type)
+ }
+ }
+}
+
+func TestListType(t *testing.T) {
+ var literals = []struct {
+ src string
+ tokens []token.Type
+ }{
+ {
+ `"foo": ["123", 123]`,
+ []token.Type{token.STRING, token.NUMBER},
+ },
+ {
+ `"foo": [123, "123",]`,
+ []token.Type{token.NUMBER, token.STRING},
+ },
+ {
+ `"foo": []`,
+ []token.Type{},
+ },
+ {
+ `"foo": ["123", 123]`,
+ []token.Type{token.STRING, token.NUMBER},
+ },
+ {
+ `"foo": ["123", {}]`,
+ []token.Type{token.STRING, token.LBRACE},
+ },
+ }
+
+ for _, l := range literals {
+ t.Logf("Testing: %s", l.src)
+
+ p := newParser([]byte(l.src))
+ item, err := p.objectItem()
+ if err != nil {
+ t.Error(err)
+ }
+
+ list, ok := item.Val.(*ast.ListType)
+ if !ok {
+ t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+ }
+
+ tokens := []token.Type{}
+ for _, li := range list.List {
+ switch v := li.(type) {
+ case *ast.LiteralType:
+ tokens = append(tokens, v.Token.Type)
+ case *ast.ObjectType:
+ tokens = append(tokens, token.LBRACE)
+ }
+ }
+
+ equals(t, l.tokens, tokens)
+ }
+}
+
+func TestObjectType(t *testing.T) {
+ var literals = []struct {
+ src string
+ nodeType []ast.Node
+ itemLen int
+ }{
+ {
+ `"foo": {}`,
+ nil,
+ 0,
+ },
+ {
+ `"foo": {
+ "bar": "fatih"
+ }`,
+ []ast.Node{&ast.LiteralType{}},
+ 1,
+ },
+ {
+ `"foo": {
+ "bar": "fatih",
+ "baz": ["arslan"]
+ }`,
+ []ast.Node{
+ &ast.LiteralType{},
+ &ast.ListType{},
+ },
+ 2,
+ },
+ {
+ `"foo": {
+ "bar": {}
+ }`,
+ []ast.Node{
+ &ast.ObjectType{},
+ },
+ 1,
+ },
+ {
+ `"foo": {
+ "bar": {},
+ "foo": true
+ }`,
+ []ast.Node{
+ &ast.ObjectType{},
+ &ast.LiteralType{},
+ },
+ 2,
+ },
+ }
+
+ for _, l := range literals {
+ t.Logf("Testing:\n%s\n", l.src)
+
+ p := newParser([]byte(l.src))
+ // p.enableTrace = true
+ item, err := p.objectItem()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // we know that the ObjectKey name is foo for all cases, what matters
+ // is the object
+ obj, ok := item.Val.(*ast.ObjectType)
+ if !ok {
+ t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+ }
+
+ // check if the total length of items are correct
+ equals(t, l.itemLen, len(obj.List.Items))
+
+ // check if the types are correct
+ for i, item := range obj.List.Items {
+ equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+ }
+ }
+}
+
+func TestFlattenObjects(t *testing.T) {
+ var literals = []struct {
+ src string
+ nodeType []ast.Node
+ itemLen int
+ }{
+ {
+ `{
+ "foo": [
+ {
+ "foo": "svh",
+ "bar": "fatih"
+ }
+ ]
+ }`,
+ []ast.Node{
+ &ast.ObjectType{},
+ &ast.LiteralType{},
+ &ast.LiteralType{},
+ },
+ 3,
+ },
+ {
+ `{
+ "variable": {
+ "foo": {}
+ }
+ }`,
+ []ast.Node{
+ &ast.ObjectType{},
+ },
+ 1,
+ },
+ {
+ `{
+ "empty": []
+ }`,
+ []ast.Node{
+ &ast.ListType{},
+ },
+ 1,
+ },
+ {
+ `{
+ "basic": [1, 2, 3]
+ }`,
+ []ast.Node{
+ &ast.ListType{},
+ },
+ 1,
+ },
+ }
+
+ for _, l := range literals {
+ t.Logf("Testing:\n%s\n", l.src)
+
+ f, err := Parse([]byte(l.src))
+ if err != nil {
+ t.Error(err)
+ }
+
+ // the first object is always an ObjectList so just assert that one
+ // so we can use it as such
+ obj, ok := f.Node.(*ast.ObjectList)
+ if !ok {
+ t.Errorf("node should be *ast.ObjectList, got: %T", f.Node)
+ }
+
+ // check if the types are correct
+ var i int
+ for _, item := range obj.Items {
+ equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+ i++
+
+ if obj, ok := item.Val.(*ast.ObjectType); ok {
+ for _, item := range obj.List.Items {
+ equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+ i++
+ }
+ }
+ }
+
+ // check if the number of items is correct
+ equals(t, l.itemLen, i)
+
+ }
+}
+
+func TestObjectKey(t *testing.T) {
+ keys := []struct {
+ exp []token.Type
+ src string
+ }{
+ {[]token.Type{token.STRING}, `"foo": {}`},
+ }
+
+ for _, k := range keys {
+ p := newParser([]byte(k.src))
+ keys, err := p.objectKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tokens := []token.Type{}
+ for _, o := range keys {
+ tokens = append(tokens, o.Token.Type)
+ }
+
+ equals(t, k.exp, tokens)
+ }
+
+ errKeys := []struct {
+ src string
+ }{
+ {`foo 12 {}`},
+ {`foo bar = {}`},
+ {`foo []`},
+ {`12 {}`},
+ }
+
+ for _, k := range errKeys {
+ p := newParser([]byte(k.src))
+ _, err := p.objectKey()
+ if err == nil {
+ t.Errorf("case '%s' should give an error", k.src)
+ }
+ }
+}
+
+// Official HCL tests
+func TestParse(t *testing.T) {
+ cases := []struct {
+ Name string
+ Err bool
+ }{
+ {
+ "array.json",
+ false,
+ },
+ {
+ "basic.json",
+ false,
+ },
+ {
+ "object.json",
+ false,
+ },
+ {
+ "types.json",
+ false,
+ },
+ {
+ "bad_input_128.json",
+ true,
+ },
+ {
+ "bad_input_tf_8110.json",
+ true,
+ },
+ {
+ "good_input_tf_8110.json",
+ false,
+ },
+ }
+
+ const fixtureDir = "./test-fixtures"
+
+ for _, tc := range cases {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ _, err = Parse(d)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
+ }
+ }
+}
+
+func TestParse_inline(t *testing.T) {
+ cases := []struct {
+ Value string
+ Err bool
+ }{
+ {"{:{", true},
+ }
+
+ for _, tc := range cases {
+ _, err := Parse([]byte(tc.Value))
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %q\n\nError: %s", tc.Value, err)
+ }
+ }
+}
+
+// equals fails the test if exp is not equal to act.
+func equals(tb testing.TB, exp, act interface{}) {
+ if !reflect.DeepEqual(exp, act) {
+ _, file, line, _ := runtime.Caller(1)
+ fmt.Printf("\033[31m%s:%d:\n\n\texp: %s\n\n\tgot: %s\033[39m\n\n", filepath.Base(file), line, exp, act)
+ tb.FailNow()
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json
new file mode 100644
index 000000000..e320f17ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json
@@ -0,0 +1,4 @@
+{
+ "foo": [1, 2, "bar"],
+ "bar": "baz"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_128.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_128.json
new file mode 100644
index 000000000..b5f850c96
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_128.json
@@ -0,0 +1 @@
+{:{
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_tf_8110.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_tf_8110.json
new file mode 100644
index 000000000..a04385833
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/bad_input_tf_8110.json
@@ -0,0 +1,7 @@
+{
+ "variable": {
+ "poc": {
+ "default": "${replace("europe-west", "-", " ")}"
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json
new file mode 100644
index 000000000..b54bde96c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json
@@ -0,0 +1,3 @@
+{
+ "foo": "bar"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/good_input_tf_8110.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/good_input_tf_8110.json
new file mode 100644
index 000000000..f21aa090d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/good_input_tf_8110.json
@@ -0,0 +1,7 @@
+{
+ "variable": {
+ "poc": {
+ "default": "${replace(\"europe-west\", \"-\", \" \")}"
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json
new file mode 100644
index 000000000..72168a3cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json
@@ -0,0 +1,5 @@
+{
+ "foo": {
+ "bar": [1,2]
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json
new file mode 100644
index 000000000..9a142a6ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json
@@ -0,0 +1,10 @@
+{
+ "foo": "bar",
+ "bar": 7,
+ "baz": [1,2,3],
+ "foo": -12,
+ "bar": 3.14159,
+ "foo": true,
+ "bar": false,
+ "foo": null
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 000000000..dd5c72bb3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ } else if lit == "null" {
+ tok = token.NULL
+ } else {
+ s.err("illegal char")
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case ':':
+ tok = token.COLON
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ s.err("illegal char")
+ }
+ default:
+ s.err("illegal char: " + string(ch))
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ zero := ch == '0'
+ pos := s.srcPos
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ // If we have a larger number and this is zero, error
+ if zero && pos != s.srcPos {
+ s.err("numbers cannot start with 0")
+ }
+
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa begining from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if ch == '\n' || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ // we scanned all digits, put the last non digit char back
+ s.unread()
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go
new file mode 100644
index 000000000..3033a5797
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go
@@ -0,0 +1,362 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+
+type tokenPair struct {
+ tok token.Type
+ text string
+}
+
+var tokenLists = map[string][]tokenPair{
+ "operator": []tokenPair{
+ {token.LBRACK, "["},
+ {token.LBRACE, "{"},
+ {token.COMMA, ","},
+ {token.PERIOD, "."},
+ {token.RBRACK, "]"},
+ {token.RBRACE, "}"},
+ },
+ "bool": []tokenPair{
+ {token.BOOL, "true"},
+ {token.BOOL, "false"},
+ },
+ "string": []tokenPair{
+ {token.STRING, `" "`},
+ {token.STRING, `"a"`},
+ {token.STRING, `"本"`},
+ {token.STRING, `"${file(\"foo\")}"`},
+ {token.STRING, `"\a"`},
+ {token.STRING, `"\b"`},
+ {token.STRING, `"\f"`},
+ {token.STRING, `"\n"`},
+ {token.STRING, `"\r"`},
+ {token.STRING, `"\t"`},
+ {token.STRING, `"\v"`},
+ {token.STRING, `"\""`},
+ {token.STRING, `"\000"`},
+ {token.STRING, `"\777"`},
+ {token.STRING, `"\x00"`},
+ {token.STRING, `"\xff"`},
+ {token.STRING, `"\u0000"`},
+ {token.STRING, `"\ufA16"`},
+ {token.STRING, `"\U00000000"`},
+ {token.STRING, `"\U0000ffAB"`},
+ {token.STRING, `"` + f100 + `"`},
+ },
+ "number": []tokenPair{
+ {token.NUMBER, "0"},
+ {token.NUMBER, "1"},
+ {token.NUMBER, "9"},
+ {token.NUMBER, "42"},
+ {token.NUMBER, "1234567890"},
+ {token.NUMBER, "-0"},
+ {token.NUMBER, "-1"},
+ {token.NUMBER, "-9"},
+ {token.NUMBER, "-42"},
+ {token.NUMBER, "-1234567890"},
+ },
+ "float": []tokenPair{
+ {token.FLOAT, "0."},
+ {token.FLOAT, "1."},
+ {token.FLOAT, "42."},
+ {token.FLOAT, "01234567890."},
+ {token.FLOAT, ".0"},
+ {token.FLOAT, ".1"},
+ {token.FLOAT, ".42"},
+ {token.FLOAT, ".0123456789"},
+ {token.FLOAT, "0.0"},
+ {token.FLOAT, "1.0"},
+ {token.FLOAT, "42.0"},
+ {token.FLOAT, "01234567890.0"},
+ {token.FLOAT, "0e0"},
+ {token.FLOAT, "1e0"},
+ {token.FLOAT, "42e0"},
+ {token.FLOAT, "01234567890e0"},
+ {token.FLOAT, "0E0"},
+ {token.FLOAT, "1E0"},
+ {token.FLOAT, "42E0"},
+ {token.FLOAT, "01234567890E0"},
+ {token.FLOAT, "0e+10"},
+ {token.FLOAT, "1e-10"},
+ {token.FLOAT, "42e+10"},
+ {token.FLOAT, "01234567890e-10"},
+ {token.FLOAT, "0E+10"},
+ {token.FLOAT, "1E-10"},
+ {token.FLOAT, "42E+10"},
+ {token.FLOAT, "01234567890E-10"},
+ {token.FLOAT, "01.8e0"},
+ {token.FLOAT, "1.4e0"},
+ {token.FLOAT, "42.2e0"},
+ {token.FLOAT, "01234567890.12e0"},
+ {token.FLOAT, "0.E0"},
+ {token.FLOAT, "1.12E0"},
+ {token.FLOAT, "42.123E0"},
+ {token.FLOAT, "01234567890.213E0"},
+ {token.FLOAT, "0.2e+10"},
+ {token.FLOAT, "1.2e-10"},
+ {token.FLOAT, "42.54e+10"},
+ {token.FLOAT, "01234567890.98e-10"},
+ {token.FLOAT, "0.1E+10"},
+ {token.FLOAT, "1.1E-10"},
+ {token.FLOAT, "42.1E+10"},
+ {token.FLOAT, "01234567890.1E-10"},
+ {token.FLOAT, "-0.0"},
+ {token.FLOAT, "-1.0"},
+ {token.FLOAT, "-42.0"},
+ {token.FLOAT, "-01234567890.0"},
+ {token.FLOAT, "-0e0"},
+ {token.FLOAT, "-1e0"},
+ {token.FLOAT, "-42e0"},
+ {token.FLOAT, "-01234567890e0"},
+ {token.FLOAT, "-0E0"},
+ {token.FLOAT, "-1E0"},
+ {token.FLOAT, "-42E0"},
+ {token.FLOAT, "-01234567890E0"},
+ {token.FLOAT, "-0e+10"},
+ {token.FLOAT, "-1e-10"},
+ {token.FLOAT, "-42e+10"},
+ {token.FLOAT, "-01234567890e-10"},
+ {token.FLOAT, "-0E+10"},
+ {token.FLOAT, "-1E-10"},
+ {token.FLOAT, "-42E+10"},
+ {token.FLOAT, "-01234567890E-10"},
+ {token.FLOAT, "-01.8e0"},
+ {token.FLOAT, "-1.4e0"},
+ {token.FLOAT, "-42.2e0"},
+ {token.FLOAT, "-01234567890.12e0"},
+ {token.FLOAT, "-0.E0"},
+ {token.FLOAT, "-1.12E0"},
+ {token.FLOAT, "-42.123E0"},
+ {token.FLOAT, "-01234567890.213E0"},
+ {token.FLOAT, "-0.2e+10"},
+ {token.FLOAT, "-1.2e-10"},
+ {token.FLOAT, "-42.54e+10"},
+ {token.FLOAT, "-01234567890.98e-10"},
+ {token.FLOAT, "-0.1E+10"},
+ {token.FLOAT, "-1.1E-10"},
+ {token.FLOAT, "-42.1E+10"},
+ {token.FLOAT, "-01234567890.1E-10"},
+ },
+}
+
+var orderedTokenLists = []string{
+ "comment",
+ "operator",
+ "bool",
+ "string",
+ "number",
+ "float",
+}
+
+func TestPosition(t *testing.T) {
+ // create artifical source code
+ buf := new(bytes.Buffer)
+
+ for _, listName := range orderedTokenLists {
+ for _, ident := range tokenLists[listName] {
+ fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text)
+ }
+ }
+
+ s := New(buf.Bytes())
+
+ pos := token.Pos{"", 4, 1, 5}
+ s.Scan()
+ for _, listName := range orderedTokenLists {
+
+ for _, k := range tokenLists[listName] {
+ curPos := s.tokPos
+ // fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column)
+
+ if curPos.Offset != pos.Offset {
+ t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text)
+ }
+ if curPos.Line != pos.Line {
+ t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text)
+ }
+ if curPos.Column != pos.Column {
+ t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text)
+ }
+ pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
+ pos.Line += countNewlines(k.text) + 1 // each token is on a new line
+
+ s.Error = func(pos token.Pos, msg string) {
+ t.Errorf("error %q for %q", msg, k.text)
+ }
+
+ s.Scan()
+ }
+ }
+ // make sure there were no token-internal errors reported by scanner
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestComment(t *testing.T) {
+ testTokenList(t, tokenLists["comment"])
+}
+
+func TestOperator(t *testing.T) {
+ testTokenList(t, tokenLists["operator"])
+}
+
+func TestBool(t *testing.T) {
+ testTokenList(t, tokenLists["bool"])
+}
+
+func TestIdent(t *testing.T) {
+ testTokenList(t, tokenLists["ident"])
+}
+
+func TestString(t *testing.T) {
+ testTokenList(t, tokenLists["string"])
+}
+
+func TestNumber(t *testing.T) {
+ testTokenList(t, tokenLists["number"])
+}
+
+func TestFloat(t *testing.T) {
+ testTokenList(t, tokenLists["float"])
+}
+
+func TestRealExample(t *testing.T) {
+ complexReal := `
+{
+ "variable": {
+ "foo": {
+ "default": "bar",
+ "description": "bar",
+ "depends_on": ["something"]
+ }
+ }
+}`
+
+ literals := []struct {
+ tokenType token.Type
+ literal string
+ }{
+ {token.LBRACE, `{`},
+ {token.STRING, `"variable"`},
+ {token.COLON, `:`},
+ {token.LBRACE, `{`},
+ {token.STRING, `"foo"`},
+ {token.COLON, `:`},
+ {token.LBRACE, `{`},
+ {token.STRING, `"default"`},
+ {token.COLON, `:`},
+ {token.STRING, `"bar"`},
+ {token.COMMA, `,`},
+ {token.STRING, `"description"`},
+ {token.COLON, `:`},
+ {token.STRING, `"bar"`},
+ {token.COMMA, `,`},
+ {token.STRING, `"depends_on"`},
+ {token.COLON, `:`},
+ {token.LBRACK, `[`},
+ {token.STRING, `"something"`},
+ {token.RBRACK, `]`},
+ {token.RBRACE, `}`},
+ {token.RBRACE, `}`},
+ {token.RBRACE, `}`},
+ {token.EOF, ``},
+ }
+
+ s := New([]byte(complexReal))
+ for _, l := range literals {
+ tok := s.Scan()
+ if l.tokenType != tok.Type {
+ t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
+ }
+
+ if l.literal != tok.Text {
+ t.Errorf("got: %s want %s\n", tok, l.literal)
+ }
+ }
+
+}
+
+func TestError(t *testing.T) {
+ testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
+ testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
+
+ testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING)
+ testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING)
+
+ testError(t, `01238`, "1:7", "numbers cannot start with 0", token.NUMBER)
+ testError(t, `01238123`, "1:10", "numbers cannot start with 0", token.NUMBER)
+ testError(t, `'aa'`, "1:1", "illegal char: '", token.ILLEGAL)
+
+ testError(t, `"`, "1:2", "literal not terminated", token.STRING)
+ testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
+ testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
+}
+
+func testError(t *testing.T, src, pos, msg string, tok token.Type) {
+ s := New([]byte(src))
+
+ errorCalled := false
+ s.Error = func(p token.Pos, m string) {
+ if !errorCalled {
+ if pos != p.String() {
+ t.Errorf("pos = %q, want %q for %q", p, pos, src)
+ }
+
+ if m != msg {
+ t.Errorf("msg = %q, want %q for %q", m, msg, src)
+ }
+ errorCalled = true
+ }
+ }
+
+ tk := s.Scan()
+ if tk.Type != tok {
+ t.Errorf("tok = %s, want %s for %q", tk, tok, src)
+ }
+ if !errorCalled {
+ t.Errorf("error handler not called for %q", src)
+ }
+ if s.ErrorCount == 0 {
+ t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
+ }
+}
+
+func testTokenList(t *testing.T, tokenList []tokenPair) {
+ // create artifical source code
+ buf := new(bytes.Buffer)
+ for _, ident := range tokenList {
+ fmt.Fprintf(buf, "%s\n", ident.text)
+ }
+
+ s := New(buf.Bytes())
+ for _, ident := range tokenList {
+ tok := s.Scan()
+ if tok.Type != ident.tok {
+ t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
+ }
+
+ if tok.Text != ident.text {
+ t.Errorf("text = %q want %q", tok.String(), ident.text)
+ }
+
+ }
+}
+
+func countNewlines(s string) int {
+ n := 0
+ for _, ch := range s {
+ if ch == '\n' {
+ n++
+ }
+ }
+ return n
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json
new file mode 100644
index 000000000..e320f17ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json
@@ -0,0 +1,4 @@
+{
+ "foo": [1, 2, "bar"],
+ "bar": "baz"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json
new file mode 100644
index 000000000..b54bde96c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json
@@ -0,0 +1,3 @@
+{
+ "foo": "bar"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json
new file mode 100644
index 000000000..72168a3cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json
@@ -0,0 +1,5 @@
+{
+ "foo": {
+ "bar": [1,2]
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json
new file mode 100644
index 000000000..9a142a6ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json
@@ -0,0 +1,10 @@
+{
+ "foo": "bar",
+ "bar": 7,
+ "baz": [1,2,3],
+ "foo": -12,
+ "bar": 3.14159,
+ "foo": true,
+ "bar": false,
+ "foo": null
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 000000000..59c1bb72d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 000000000..95a0c3eee
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+ "fmt"
+ "strconv"
+
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+
+ identifier_beg
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ NULL // null
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+ COLON // :
+
+ RBRACK // ]
+ RBRACE // }
+
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+ NULL: "NULL",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ COLON: "COLON",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+ switch t.Type {
+ case BOOL:
+ return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+ case FLOAT:
+ return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+ case NULL:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+ case NUMBER:
+ return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+ case STRING:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+ default:
+ panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token_test.go b/vendor/github.com/hashicorp/hcl/json/token/token_test.go
new file mode 100644
index 000000000..a83fdd55b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token_test.go
@@ -0,0 +1,34 @@
+package token
+
+import (
+ "testing"
+)
+
+func TestTypeString(t *testing.T) {
+ var tokens = []struct {
+ tt Type
+ str string
+ }{
+ {ILLEGAL, "ILLEGAL"},
+ {EOF, "EOF"},
+ {NUMBER, "NUMBER"},
+ {FLOAT, "FLOAT"},
+ {BOOL, "BOOL"},
+ {STRING, "STRING"},
+ {NULL, "NULL"},
+ {LBRACK, "LBRACK"},
+ {LBRACE, "LBRACE"},
+ {COMMA, "COMMA"},
+ {PERIOD, "PERIOD"},
+ {RBRACK, "RBRACK"},
+ {RBRACE, "RBRACE"},
+ }
+
+ for _, token := range tokens {
+ if token.tt.String() != token.str {
+ t.Errorf("want: %q got:%q\n", token.str, token.tt)
+
+ }
+ }
+
+}