From 54d3d47daf9190275bbdaf8703b84969a4593451 Mon Sep 17 00:00:00 2001 From: Corey Hulen Date: Fri, 24 Mar 2017 23:31:34 -0700 Subject: PLT-6076 Adding viper libs for config file changes (#5871) * Adding viper libs for config file changes * Removing the old fsnotify lib * updating some missing libs --- vendor/github.com/hashicorp/hcl/hcl/ast/ast.go | 219 ++++++ .../github.com/hashicorp/hcl/hcl/ast/ast_test.go | 200 ++++++ vendor/github.com/hashicorp/hcl/hcl/ast/walk.go | 52 ++ .../github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go | 162 +++++ .../hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go | 440 ++++++++++++ .../hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore | 1 + .../hcl/hcl/fmtcmd/test-fixtures/dir.ignore | 0 .../hcl/hcl/fmtcmd/test-fixtures/file.ignore | 1 + .../hcl/hcl/fmtcmd/test-fixtures/good.hcl | 0 .../github.com/hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/error_test.go | 9 + .../github.com/hashicorp/hcl/hcl/parser/parser.go | 514 ++++++++++++++ .../hashicorp/hcl/hcl/parser/parser_test.go | 566 +++++++++++++++ .../hcl/hcl/parser/test-fixtures/array_comment.hcl | 4 + .../hcl/parser/test-fixtures/array_comment_2.hcl | 6 + .../hcl/hcl/parser/test-fixtures/assign_colon.hcl | 6 + .../hcl/hcl/parser/test-fixtures/assign_deep.hcl | 5 + .../hcl/hcl/parser/test-fixtures/comment.hcl | 15 + .../hcl/parser/test-fixtures/comment_lastline.hcl | 1 + .../hcl/parser/test-fixtures/comment_single.hcl | 1 + .../hcl/hcl/parser/test-fixtures/complex.hcl | 42 ++ .../hcl/hcl/parser/test-fixtures/complex_key.hcl | 1 + .../hcl/hcl/parser/test-fixtures/empty.hcl | 0 .../hcl/hcl/parser/test-fixtures/git_crypt.hcl | Bin 0 -> 10 bytes .../hcl/parser/test-fixtures/key_without_value.hcl | 1 + .../hcl/hcl/parser/test-fixtures/list.hcl | 1 + .../hcl/hcl/parser/test-fixtures/list_comma.hcl | 1 + .../hcl/parser/test-fixtures/missing_braces.hcl | 4 + .../hcl/hcl/parser/test-fixtures/multiple.hcl | 2 + .../object_key_assign_without_value.hcl | 3 + .../object_key_assign_without_value2.hcl | 4 + .../object_key_assign_without_value3.hcl | 4 + .../test-fixtures/object_key_without_value.hcl | 3 + .../hcl/parser/test-fixtures/object_list_comma.hcl | 1 + .../hashicorp/hcl/hcl/parser/test-fixtures/old.hcl | 3 + .../hcl/hcl/parser/test-fixtures/structure.hcl | 5 + .../hcl/parser/test-fixtures/structure_basic.hcl | 5 + .../hcl/parser/test-fixtures/structure_empty.hcl | 1 + .../hcl/hcl/parser/test-fixtures/types.hcl | 7 + .../parser/test-fixtures/unterminated_object.hcl | 2 + .../parser/test-fixtures/unterminated_object_2.hcl | 6 + .../github.com/hashicorp/hcl/hcl/printer/nodes.go | 779 +++++++++++++++++++++ .../hashicorp/hcl/hcl/printer/printer.go | 67 ++ .../hashicorp/hcl/hcl/printer/printer_test.go | 153 ++++ .../hcl/hcl/printer/testdata/comment.golden | 36 + .../hcl/hcl/printer/testdata/comment.input | 37 + .../hcl/printer/testdata/comment_aligned.golden | 32 + .../hcl/hcl/printer/testdata/comment_aligned.input | 28 + .../hcl/hcl/printer/testdata/comment_array.golden | 13 + .../hcl/hcl/printer/testdata/comment_array.input | 13 + .../hcl/printer/testdata/comment_end_file.golden | 6 + .../hcl/printer/testdata/comment_end_file.input | 5 + .../testdata/comment_multiline_indent.golden | 12 + .../testdata/comment_multiline_indent.input | 13 + .../testdata/comment_multiline_no_stanza.golden | 7 + .../testdata/comment_multiline_no_stanza.input | 6 + .../testdata/comment_multiline_stanza.golden | 10 + .../testdata/comment_multiline_stanza.input | 10 + .../hcl/printer/testdata/comment_newline.golden | 3 + .../hcl/hcl/printer/testdata/comment_newline.input | 2 + .../printer/testdata/comment_object_multi.golden | 9 + .../printer/testdata/comment_object_multi.input | 9 + .../hcl/printer/testdata/comment_standalone.golden | 17 + .../hcl/printer/testdata/comment_standalone.input | 16 + .../hcl/hcl/printer/testdata/complexhcl.golden | 54 ++ .../hcl/hcl/printer/testdata/complexhcl.input | 53 ++ .../hcl/hcl/printer/testdata/empty_block.golden | 12 + .../hcl/hcl/printer/testdata/empty_block.input | 14 + .../hashicorp/hcl/hcl/printer/testdata/list.golden | 43 ++ .../hashicorp/hcl/hcl/printer/testdata/list.input | 37 + .../hcl/hcl/printer/testdata/list_comment.golden | 7 + .../hcl/hcl/printer/testdata/list_comment.input | 6 + .../hcl/printer/testdata/list_of_objects.golden | 10 + .../hcl/hcl/printer/testdata/list_of_objects.input | 10 + .../hcl/printer/testdata/multiline_string.golden | 7 + .../hcl/printer/testdata/multiline_string.input | 7 + .../hcl/printer/testdata/object_singleline.golden | 26 + .../hcl/printer/testdata/object_singleline.input | 19 + .../printer/testdata/object_with_heredoc.golden | 6 + .../hcl/printer/testdata/object_with_heredoc.input | 6 + .../hashicorp/hcl/hcl/scanner/scanner.go | 651 +++++++++++++++++ .../hashicorp/hcl/hcl/scanner/scanner_test.go | 591 ++++++++++++++++ .../github.com/hashicorp/hcl/hcl/strconv/quote.go | 241 +++++++ .../hashicorp/hcl/hcl/strconv/quote_test.go | 96 +++ .../hcl/hcl/test-fixtures/array_comment.hcl | 4 + .../hcl/hcl/test-fixtures/assign_colon.hcl | 6 + .../hashicorp/hcl/hcl/test-fixtures/comment.hcl | 15 + .../hcl/hcl/test-fixtures/comment_single.hcl | 1 + .../hashicorp/hcl/hcl/test-fixtures/complex.hcl | 42 ++ .../hcl/hcl/test-fixtures/complex_key.hcl | 1 + .../hashicorp/hcl/hcl/test-fixtures/empty.hcl | 0 .../hashicorp/hcl/hcl/test-fixtures/list.hcl | 1 + .../hashicorp/hcl/hcl/test-fixtures/list_comma.hcl | 1 + .../hashicorp/hcl/hcl/test-fixtures/multiple.hcl | 2 + .../hashicorp/hcl/hcl/test-fixtures/old.hcl | 3 + .../hashicorp/hcl/hcl/test-fixtures/structure.hcl | 5 + .../hcl/hcl/test-fixtures/structure_basic.hcl | 5 + .../hcl/hcl/test-fixtures/structure_empty.hcl | 1 + .../hashicorp/hcl/hcl/test-fixtures/types.hcl | 7 + .../github.com/hashicorp/hcl/hcl/token/position.go | 46 ++ vendor/github.com/hashicorp/hcl/hcl/token/token.go | 219 ++++++ .../hashicorp/hcl/hcl/token/token_test.go | 69 ++ 102 files changed, 5922 insertions(+) create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore create mode 100644 vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore create mode 100644 vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore create mode 100644 vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/array_comment.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/array_comment_2.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/assign_colon.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/assign_deep.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_lastline.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_single.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_key.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/empty.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/git_crypt.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/key_without_value.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/list.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/list_comma.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/missing_braces.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/multiple.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_without_value.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_list_comma.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/old.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/structure.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/structure_basic.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/structure_empty.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/types.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/unterminated_object.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/unterminated_object_2.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_no_stanza.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_no_stanza.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_stanza.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_stanza.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_newline.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_newline.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_object_multi.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_object_multi.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/empty_block.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/empty_block.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_of_objects.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_of_objects.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/multiline_string.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/object_singleline.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/object_singleline.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/object_with_heredoc.golden create mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/testdata/object_with_heredoc.input create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token_test.go (limited to 'vendor/github.com/hashicorp/hcl/hcl') diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 000000000..6e5ef654b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go new file mode 100644 index 000000000..942256cad --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go @@ -0,0 +1,200 @@ +package ast + +import ( + "reflect" + "strings" + "testing" + + "github.com/hashicorp/hcl/hcl/token" +) + +func TestObjectListFilter(t *testing.T) { + var cases = []struct { + Filter []string + Input []*ObjectItem + Output []*ObjectItem + }{ + { + []string{"foo"}, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{ + Token: token.Token{Type: token.STRING, Text: `"foo"`}, + }, + }, + }, + }, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{}, + }, + }, + }, + + { + []string{"foo"}, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + }, + []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + }, + }, + } + + for _, tc := range cases { + input := &ObjectList{Items: tc.Input} + expected := &ObjectList{Items: tc.Output} + if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) { + t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual) + } + } +} + +func TestWalk(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}}, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + order := []string{ + "*ast.ObjectList", + "*ast.ObjectItem", + "*ast.ObjectKey", + "*ast.ObjectKey", + "*ast.LiteralType", + "*ast.ObjectItem", + "*ast.ObjectKey", + } + count := 0 + + Walk(node, func(n Node) (Node, bool) { + if n == nil { + return n, false + } + + typeName := reflect.TypeOf(n).String() + if order[count] != typeName { + t.Errorf("expected '%s' got: '%s'", order[count], typeName) + } + count++ + return n, true + }) +} + +func TestWalkEquality(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + rewritten := Walk(node, func(n Node) (Node, bool) { return n, true }) + + newNode, ok := rewritten.(*ObjectList) + if !ok { + t.Fatalf("expected Objectlist, got %T", rewritten) + } + + if !reflect.DeepEqual(node, newNode) { + t.Fatal("rewritten node is not equal to the given node") + } + + if len(newNode.Items) != 2 { + t.Error("expected newNode length 2, got: %d", len(newNode.Items)) + } + + expected := []string{ + `"foo"`, + `"bar"`, + } + + for i, item := range newNode.Items { + if len(item.Keys) != 1 { + t.Error("expected keys newNode length 1, got: %d", len(item.Keys)) + } + + if item.Keys[0].Token.Text != expected[i] { + t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text) + } + + if item.Val != nil { + t.Errorf("expected item value should be nil") + } + } +} + +func TestWalkRewrite(t *testing.T) { + items := []*ObjectItem{ + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, + }, + }, + &ObjectItem{ + Keys: []*ObjectKey{ + &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, + }, + }, + } + + node := &ObjectList{Items: items} + + suffix := "_example" + node = Walk(node, func(n Node) (Node, bool) { + switch i := n.(type) { + case *ObjectKey: + i.Token.Text = i.Token.Text + suffix + n = i + } + return n, true + }).(*ObjectList) + + Walk(node, func(n Node) (Node, bool) { + switch i := n.(type) { + case *ObjectKey: + if !strings.HasSuffix(i.Token.Text, suffix) { + t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix) + } + } + return n, true + }) + +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 000000000..ba07ad42b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go new file mode 100644 index 000000000..2380d71e3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go @@ -0,0 +1,162 @@ +// Derivative work from: +// - https://golang.org/src/cmd/gofmt/gofmt.go +// - https://github.com/fatih/hclfmt + +package fmtcmd + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/hcl/printer" +) + +var ( + ErrWriteStdin = errors.New("cannot use write option with standard input") +) + +type Options struct { + List bool // list files whose formatting differs + Write bool // write result to (source) file instead of stdout + Diff bool // display diffs of formatting changes +} + +func isValidFile(f os.FileInfo, extensions []string) bool { + if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") { + for _, ext := range extensions { + if strings.HasSuffix(f.Name(), "."+ext) { + return true + } + } + } + + return false +} + +// If in == nil, the source is the contents of the file with the given filename. +func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error { + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + in = f + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + res, err := printer.Format(src) + if err != nil { + return fmt.Errorf("In %s: %s", filename, err) + } + + if !bytes.Equal(src, res) { + // formatting has changed + if opts.List { + fmt.Fprintln(out, filename) + } + if opts.Write { + err = ioutil.WriteFile(filename, res, 0644) + if err != nil { + return err + } + } + if opts.Diff { + data, err := diff(src, res) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename) + out.Write(data) + } + } + + if !opts.List && !opts.Write && !opts.Diff { + _, err = out.Write(res) + } + + return err +} + +func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error { + visitFile := func(path string, f os.FileInfo, err error) error { + if err == nil && isValidFile(f, extensions) { + err = processFile(path, nil, stdout, false, opts) + } + return err + } + + return filepath.Walk(path, visitFile) +} + +func Run( + paths, extensions []string, + stdin io.Reader, + stdout io.Writer, + opts Options, +) error { + if len(paths) == 0 { + if opts.Write { + return ErrWriteStdin + } + if err := processFile("", stdin, stdout, true, opts); err != nil { + return err + } + return nil + } + + for _, path := range paths { + switch dir, err := os.Stat(path); { + case err != nil: + return err + case dir.IsDir(): + if err := walkDir(path, extensions, stdout, opts); err != nil { + return err + } + default: + if err := processFile(path, nil, stdout, false, opts); err != nil { + return err + } + } + } + + return nil +} + +func diff(b1, b2 []byte) (data []byte, err error) { + f1, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go new file mode 100644 index 000000000..b952d76d8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go @@ -0,0 +1,440 @@ +// +build !windows +// TODO(jen20): These need fixing on Windows but fmt is not used right now +// and red CI is making it harder to process other bugs, so ignore until +// we get around to fixing them. + +package fmtcmd + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "syscall" + "testing" + + "github.com/hashicorp/hcl/testhelper" +) + +var fixtureExtensions = []string{"hcl"} + +func init() { + sort.Sort(ByFilename(fixtures)) +} + +func TestIsValidFile(t *testing.T) { + const fixtureDir = "./test-fixtures" + + cases := []struct { + Path string + Expected bool + }{ + {"good.hcl", true}, + {".hidden.ignore", false}, + {"file.ignore", false}, + {"dir.ignore", false}, + } + + for _, tc := range cases { + file, err := os.Stat(filepath.Join(fixtureDir, tc.Path)) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + if res := isValidFile(file, fixtureExtensions); res != tc.Expected { + t.Errorf("want: %b, got: %b", tc.Expected, res) + } + } +} + +func TestRunMultiplePaths(t *testing.T) { + path1, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path1) + path2, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path2) + + var expectedOut bytes.Buffer + for _, path := range []string{path1, path2} { + for _, fixture := range fixtures { + if !bytes.Equal(fixture.golden, fixture.input) { + expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n") + } + } + } + + _, stdout := mockIO() + err = Run( + []string{path1, path2}, + fixtureExtensions, + nil, stdout, + Options{ + List: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunSubDirectories(t *testing.T) { + pathParent, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(pathParent) + + path1, err := renderFixtures(pathParent) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + path2, err := renderFixtures(pathParent) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + paths := []string{path1, path2} + sort.Strings(paths) + + var expectedOut bytes.Buffer + for _, path := range paths { + for _, fixture := range fixtures { + if !bytes.Equal(fixture.golden, fixture.input) { + expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n") + } + } + } + + _, stdout := mockIO() + err = Run( + []string{pathParent}, + fixtureExtensions, + nil, stdout, + Options{ + List: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunStdin(t *testing.T) { + var expectedOut bytes.Buffer + for i, fixture := range fixtures { + if i != 0 { + expectedOut.WriteString("\n") + } + expectedOut.Write(fixture.golden) + } + + stdin, stdout := mockIO() + for _, fixture := range fixtures { + stdin.Write(fixture.input) + } + + err := Run( + []string{}, + fixtureExtensions, + stdin, stdout, + Options{}, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunStdinAndWrite(t *testing.T) { + var expectedOut = []byte{} + + stdin, stdout := mockIO() + stdin.WriteString("") + err := Run( + []string{}, []string{}, + stdin, stdout, + Options{ + Write: true, + }, + ) + + if err != ErrWriteStdin { + t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err) + } + if !bytes.Equal(stdout.Bytes(), expectedOut) { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunFileError(t *testing.T) { + path, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + filename := filepath.Join(path, "unreadable.hcl") + + var expectedError = &os.PathError{ + Op: "open", + Path: filename, + Err: syscall.EACCES, + } + + err = ioutil.WriteFile(filename, []byte{}, 0000) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{}, + ) + + if !reflect.DeepEqual(err, expectedError) { + t.Errorf("error want: %#v, got: %#v", expectedError, err) + } +} + +func TestRunNoOptions(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + var expectedOut bytes.Buffer + for _, fixture := range fixtures { + expectedOut.Write(fixture.golden) + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{}, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunList(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + var expectedOut bytes.Buffer + for _, fixture := range fixtures { + if !bytes.Equal(fixture.golden, fixture.input) { + expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename))) + } + } + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{ + List: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if stdout.String() != expectedOut.String() { + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + } +} + +func TestRunWrite(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{ + Write: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + for _, fixture := range fixtures { + res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename)) + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if !bytes.Equal(res, fixture.golden) { + t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res) + } + } +} + +func TestRunDiff(t *testing.T) { + path, err := renderFixtures("") + if err != nil { + t.Errorf("unexpected error: %s", err) + } + defer os.RemoveAll(path) + + var expectedOut bytes.Buffer + for _, fixture := range fixtures { + if len(fixture.diff) > 0 { + expectedOut.WriteString( + regexp.QuoteMeta( + fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename), + ), + ) + // Need to use regex to ignore datetimes in diff. + expectedOut.WriteString(`--- .+?\n`) + expectedOut.WriteString(`\+\+\+ .+?\n`) + expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff))) + } + } + + expectedOutString := testhelper.Unix2dos(expectedOut.String()) + + _, stdout := mockIO() + err = Run( + []string{path}, + fixtureExtensions, + nil, stdout, + Options{ + Diff: true, + }, + ) + + if err != nil { + t.Errorf("unexpected error: %s", err) + } + if !regexp.MustCompile(expectedOutString).Match(stdout.Bytes()) { + t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOutString, stdout) + } +} + +func mockIO() (stdin, stdout *bytes.Buffer) { + return new(bytes.Buffer), new(bytes.Buffer) +} + +type fixture struct { + filename string + input, golden, diff []byte +} + +type ByFilename []fixture + +func (s ByFilename) Len() int { return len(s) } +func (s ByFilename) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) } + +var fixtures = []fixture{ + { + "noop.hcl", + []byte(`resource "aws_security_group" "firewall" { + count = 5 +} +`), + []byte(`resource "aws_security_group" "firewall" { + count = 5 +} +`), + []byte(``), + }, { + "align_equals.hcl", + []byte(`variable "foo" { + default = "bar" + description = "bar" +} +`), + []byte(`variable "foo" { + default = "bar" + description = "bar" +} +`), + []byte(`@@ -1,4 +1,4 @@ + variable "foo" { +- default = "bar" ++ default = "bar" + description = "bar" + } +`), + }, { + "indentation.hcl", + []byte(`provider "aws" { + access_key = "foo" + secret_key = "bar" +} +`), + []byte(`provider "aws" { + access_key = "foo" + secret_key = "bar" +} +`), + []byte(`@@ -1,4 +1,4 @@ + provider "aws" { +- access_key = "foo" +- secret_key = "bar" ++ access_key = "foo" ++ secret_key = "bar" + } +`), + }, +} + +// parent can be an empty string, in which case the system's default +// temporary directory will be used. +func renderFixtures(parent string) (path string, err error) { + path, err = ioutil.TempDir(parent, "") + if err != nil { + return "", err + } + + for _, fixture := range fixtures { + err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644) + if err != nil { + os.RemoveAll(path) + return "", err + } + } + + return path, nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore new file mode 100644 index 000000000..9977a2836 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore @@ -0,0 +1 @@ +invalid diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore new file mode 100644 index 000000000..9977a2836 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore @@ -0,0 +1 @@ +invalid diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 000000000..5c99381df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go new file mode 100644 index 000000000..32399fec5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go @@ -0,0 +1,9 @@ +package parser + +import ( + "testing" +) + +func TestPosError_impl(t *testing.T) { + var _ error = new(PosError) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 000000000..6e54bed97 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,514 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")) + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go new file mode 100644 index 000000000..575865838 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go @@ -0,0 +1,566 @@ +package parser + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +func TestType(t *testing.T) { + var literals = []struct { + typ token.Type + src string + }{ + {token.STRING, `foo = "foo"`}, + {token.NUMBER, `foo = 123`}, + {token.NUMBER, `foo = -29`}, + {token.FLOAT, `foo = 123.12`}, + {token.FLOAT, `foo = -123.12`}, + {token.BOOL, `foo = true`}, + {token.HEREDOC, "foo = <= 0 { + result = p.heredocIndent(result) + } + } + + return result +} + +// objectItem returns the printable HCL form of an object item. An object type +// starts with one/multiple keys and has a value. The value might be of any +// type. +func (p *printer) objectItem(o *ast.ObjectItem) []byte { + defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) + var buf bytes.Buffer + + if o.LeadComment != nil { + for _, comment := range o.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range o.Keys { + buf.WriteString(k.Token.Text) + buf.WriteByte(blank) + + // reach end of key + if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + buf.Write(p.output(o.Val)) + + if o.Val.Pos().Line == o.Keys[0].Pos().Line && o.LineComment != nil { + buf.WriteByte(blank) + for _, comment := range o.LineComment.List { + buf.WriteString(comment.Text) + } + } + + return buf.Bytes() +} + +// objectType returns the printable HCL form of an object type. An object type +// begins with a brace and ends with a brace. +func (p *printer) objectType(o *ast.ObjectType) []byte { + defer un(trace(p, "ObjectType")) + var buf bytes.Buffer + buf.WriteString("{") + + var index int + var nextItem token.Pos + var commented, newlinePrinted bool + for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is the closing brace + if index != len(o.List.Items) { + nextItem = o.List.Items[index].Pos() + } else { + nextItem = o.Rbrace + } + + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. + for _, c := range p.standaloneComments { + printed := false + var lastCommentPos token.Pos + for _, comment := range c.List { + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. + if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { + // If there are standalone comments and the initial newline has not + // been printed yet, do it now. + if !newlinePrinted { + newlinePrinted = true + buf.WriteByte(newline) + } + + // add newline if it's between other printed nodes + if index > 0 { + commented = true + buf.WriteByte(newline) + } + + // Store this position + lastCommentPos = comment.Pos() + + // output the comment itself + buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) + + // Set printed to true to note that we printed something + printed = true + + /* + if index != len(o.List.Items) { + buf.WriteByte(newline) // do not print on the end + } + */ + } + } + + // Stuff to do if we had comments + if printed { + // Always write a newline + buf.WriteByte(newline) + + // If there is another item in the object and our comment + // didn't hug it directly, then make sure there is a blank + // line separating them. + if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { + buf.WriteByte(newline) + } + } + } + + if index == len(o.List.Items) { + p.prev = o.Rbrace + break + } + + // At this point we are sure that it's not a totally empty block: print + // the initial newline if it hasn't been printed yet by the previous + // block about standalone comments. + if !newlinePrinted { + buf.WriteByte(newline) + newlinePrinted = true + } + + // check if we have adjacent one liner items. If yes we'll going to align + // the comments. + var aligned []*ast.ObjectItem + for _, item := range o.List.Items[index:] { + // we don't group one line lists + if len(o.List.Items) == 1 { + break + } + + // one means a oneliner with out any lead comment + // two means a oneliner with lead comment + // anything else might be something else + cur := lines(string(p.objectItem(item))) + if cur > 2 { + break + } + + curPos := item.Pos() + + nextPos := token.Pos{} + if index != len(o.List.Items)-1 { + nextPos = o.List.Items[index+1].Pos() + } + + prevPos := token.Pos{} + if index != 0 { + prevPos = o.List.Items[index-1].Pos() + } + + // fmt.Println("DEBUG ----------------") + // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) + // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) + // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) + + if curPos.Line+1 == nextPos.Line { + aligned = append(aligned, item) + index++ + continue + } + + if curPos.Line-1 == prevPos.Line { + aligned = append(aligned, item) + index++ + + // finish if we have a new line or comment next. This happens + // if the next item is not adjacent + if curPos.Line+1 != nextPos.Line { + break + } + continue + } + + break + } + + // put newlines if the items are between other non aligned items. + // newlines are also added if there is a standalone comment already, so + // check it too + if !commented && index != len(aligned) { + buf.WriteByte(newline) + } + + if len(aligned) >= 1 { + p.prev = aligned[len(aligned)-1].Pos() + + items := p.alignedItems(aligned) + buf.Write(p.indent(items)) + } else { + p.prev = o.List.Items[index].Pos() + + buf.Write(p.indent(p.objectItem(o.List.Items[index]))) + index++ + } + + buf.WriteByte(newline) + } + + buf.WriteString("}") + return buf.Bytes() +} + +func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { + var buf bytes.Buffer + + // find the longest key and value length, needed for alignment + var longestKeyLen int // longest key length + var longestValLen int // longest value length + for _, item := range items { + key := len(item.Keys[0].Token.Text) + val := len(p.output(item.Val)) + + if key > longestKeyLen { + longestKeyLen = key + } + + if val > longestValLen { + longestValLen = val + } + } + + for i, item := range items { + if item.LeadComment != nil { + for _, comment := range item.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range item.Keys { + keyLen := len(k.Token.Text) + buf.WriteString(k.Token.Text) + for i := 0; i < longestKeyLen-keyLen+1; i++ { + buf.WriteByte(blank) + } + + // reach end of key + if i == len(item.Keys)-1 && len(item.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + val := p.output(item.Val) + valLen := len(val) + buf.Write(val) + + if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { + for i := 0; i < longestValLen-valLen+1; i++ { + buf.WriteByte(blank) + } + + for _, comment := range item.LineComment.List { + buf.WriteString(comment.Text) + } + } + + // do not print for the last item + if i != len(items)-1 { + buf.WriteByte(newline) + } + } + + return buf.Bytes() +} + +// list returns the printable HCL form of an list type. +func (p *printer) list(l *ast.ListType) []byte { + var buf bytes.Buffer + buf.WriteString("[") + + var longestLine int + for _, item := range l.List { + // for now we assume that the list only contains literal types + if lit, ok := item.(*ast.LiteralType); ok { + lineLen := len(lit.Token.Text) + if lineLen > longestLine { + longestLine = lineLen + } + } + } + + insertSpaceBeforeItem := false + lastHadLeadComment := false + for i, item := range l.List { + // Keep track of whether this item is a heredoc since that has + // unique behavior. + heredoc := false + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + heredoc = true + } + + if item.Pos().Line != l.Lbrack.Line { + // multiline list, add newline before we add each item + buf.WriteByte(newline) + insertSpaceBeforeItem = false + + // If we have a lead comment, then we want to write that first + leadComment := false + if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { + leadComment = true + + // If this isn't the first item and the previous element + // didn't have a lead comment, then we need to add an extra + // newline to properly space things out. If it did have a + // lead comment previously then this would be done + // automatically. + if i > 0 && !lastHadLeadComment { + buf.WriteByte(newline) + } + + for _, comment := range lit.LeadComment.List { + buf.Write(p.indent([]byte(comment.Text))) + buf.WriteByte(newline) + } + } + + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(p.indent(val)) + + // if this item is a heredoc, then we output the comma on + // the next line. This is the only case this happens. + comma := []byte{','} + if heredoc { + buf.WriteByte(newline) + comma = p.indent(comma) + } + + buf.Write(comma) + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } + + lastItem := i == len(l.List)-1 + if lastItem { + buf.WriteByte(newline) + } + + if leadComment && !lastItem { + buf.WriteByte(newline) + } + + lastHadLeadComment = leadComment + } else { + if insertSpaceBeforeItem { + buf.WriteByte(blank) + insertSpaceBeforeItem = false + } + + // Output the item itself + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(val) + + // If this is a heredoc item we always have to output a newline + // so that it parses properly. + if heredoc { + buf.WriteByte(newline) + } + + // If this isn't the last element, write a comma. + if i != len(l.List)-1 { + buf.WriteString(",") + insertSpaceBeforeItem = true + } + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } + } + + } + + buf.WriteString("]") + return buf.Bytes() +} + +// indent indents the lines of the given buffer for each non-empty line +func (p *printer) indent(buf []byte) []byte { + var prefix []byte + if p.cfg.SpacesWidth != 0 { + for i := 0; i < p.cfg.SpacesWidth; i++ { + prefix = append(prefix, blank) + } + } else { + prefix = []byte{tab} + } + + var res []byte + bol := true + for _, c := range buf { + if bol && c != '\n' { + res = append(res, prefix...) + } + + res = append(res, c) + bol = c == '\n' + } + return res +} + +// unindent removes all the indentation from the tombstoned lines +func (p *printer) unindent(buf []byte) []byte { + var res []byte + for i := 0; i < len(buf); i++ { + skip := len(buf)-i <= len(unindent) + if !skip { + skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) + } + if skip { + res = append(res, buf[i]) + continue + } + + // We have a marker. we have to backtrace here and clean out + // any whitespace ahead of our tombstone up to a \n + for j := len(res) - 1; j >= 0; j-- { + if res[j] == '\n' { + break + } + + res = res[:j] + } + + // Skip the entire unindent marker + i += len(unindent) - 1 + } + + return res +} + +// heredocIndent marks all the 2nd and further lines as unindentable +func (p *printer) heredocIndent(buf []byte) []byte { + var res []byte + bol := false + for _, c := range buf { + if bol && c != '\n' { + res = append(res, unindent...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// isSingleLineObject tells whether the given object item is a single +// line object such as "obj {}". +// +// A single line object: +// +// * has no lead comments (hence multi-line) +// * has no assignment +// * has no values in the stanza (within {}) +// +func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { + // If there is a lead comment, can't be one line + if val.LeadComment != nil { + return false + } + + // If there is assignment, we always break by line + if val.Assign.IsValid() { + return false + } + + // If it isn't an object type, then its not a single line object + ot, ok := val.Val.(*ast.ObjectType) + if !ok { + return false + } + + // If the object has no items, it is single line! + return len(ot.List.Items) == 0 +} + +func lines(txt string) int { + endline := 1 + for i := 0; i < len(txt); i++ { + if txt[i] == '\n' { + endline++ + } + } + return endline +} + +// ---------------------------------------------------------------------------- +// Tracing support + +func (p *printer) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + i := 2 * p.indentTrace + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *printer, msg string) *printer { + p.printTrace(msg, "(") + p.indentTrace++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *printer) { + p.indentTrace-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go new file mode 100644 index 000000000..a296fc851 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go @@ -0,0 +1,67 @@ +// Package printer implements printing of AST nodes to HCL format. +package printer + +import ( + "bytes" + "io" + "text/tabwriter" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" +) + +var DefaultConfig = Config{ + SpacesWidth: 2, +} + +// A Config node controls the output of Fprint. +type Config struct { + SpacesWidth int // if set, it will use spaces instead of tabs for alignment +} + +func (c *Config) Fprint(output io.Writer, node ast.Node) error { + p := &printer{ + cfg: *c, + comments: make([]*ast.CommentGroup, 0), + standaloneComments: make([]*ast.CommentGroup, 0), + // enableTrace: true, + } + + p.collectComments(node) + + if _, err := output.Write(p.unindent(p.output(node))); err != nil { + return err + } + + // flush tabwriter, if any + var err error + if tw, _ := output.(*tabwriter.Writer); tw != nil { + err = tw.Flush() + } + + return err +} + +// Fprint "pretty-prints" an HCL node to output +// It calls Config.Fprint with default settings. +func Fprint(output io.Writer, node ast.Node) error { + return DefaultConfig.Fprint(output, node) +} + +// Format formats src HCL and returns the result. +func Format(src []byte) ([]byte, error) { + node, err := parser.Parse(src) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + if err := DefaultConfig.Fprint(&buf, node); err != nil { + return nil, err + } + + // Add trailing newline to result + buf.WriteString("\n") + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go new file mode 100644 index 000000000..01ea3b2de --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go @@ -0,0 +1,153 @@ +// +build !windows +// TODO(jen20): These need fixing on Windows but printer is not used right now +// and red CI is making it harder to process other bugs, so ignore until +// we get around to fixing them.package printer + +package printer + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/hashicorp/hcl/hcl/parser" +) + +var update = flag.Bool("update", false, "update golden files") + +const ( + dataDir = "testdata" +) + +type entry struct { + source, golden string +} + +// Use go test -update to create/update the respective golden files. +var data = []entry{ + {"complexhcl.input", "complexhcl.golden"}, + {"list.input", "list.golden"}, + {"list_comment.input", "list_comment.golden"}, + {"comment.input", "comment.golden"}, + {"comment_aligned.input", "comment_aligned.golden"}, + {"comment_array.input", "comment_array.golden"}, + {"comment_end_file.input", "comment_end_file.golden"}, + {"comment_multiline_indent.input", "comment_multiline_indent.golden"}, + {"comment_multiline_no_stanza.input", "comment_multiline_no_stanza.golden"}, + {"comment_multiline_stanza.input", "comment_multiline_stanza.golden"}, + {"comment_newline.input", "comment_newline.golden"}, + {"comment_object_multi.input", "comment_object_multi.golden"}, + {"comment_standalone.input", "comment_standalone.golden"}, + {"empty_block.input", "empty_block.golden"}, + {"list_of_objects.input", "list_of_objects.golden"}, + {"multiline_string.input", "multiline_string.golden"}, + {"object_singleline.input", "object_singleline.golden"}, + {"object_with_heredoc.input", "object_with_heredoc.golden"}, +} + +func TestFiles(t *testing.T) { + for _, e := range data { + source := filepath.Join(dataDir, e.source) + golden := filepath.Join(dataDir, e.golden) + t.Run(e.source, func(t *testing.T) { + check(t, source, golden) + }) + } +} + +func check(t *testing.T, source, golden string) { + src, err := ioutil.ReadFile(source) + if err != nil { + t.Error(err) + return + } + + res, err := format(src) + if err != nil { + t.Error(err) + return + } + + // update golden files if necessary + if *update { + if err := ioutil.WriteFile(golden, res, 0644); err != nil { + t.Error(err) + } + return + } + + // get golden + gld, err := ioutil.ReadFile(golden) + if err != nil { + t.Error(err) + return + } + + // formatted source and golden must be the same + if err := diff(source, golden, res, gld); err != nil { + t.Error(err) + return + } +} + +// diff compares a and b. +func diff(aname, bname string, a, b []byte) error { + var buf bytes.Buffer // holding long error message + + // compare lengths + if len(a) != len(b) { + fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b)) + } + + // compare contents + line := 1 + offs := 1 + for i := 0; i < len(a) && i < len(b); i++ { + ch := a[i] + if ch != b[i] { + fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs)) + fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs)) + fmt.Fprintf(&buf, "\n\n") + break + } + if ch == '\n' { + line++ + offs = i + 1 + } + } + + if buf.Len() > 0 { + return errors.New(buf.String()) + } + return nil +} + +// format parses src, prints the corresponding AST, verifies the resulting +// src is syntactically correct, and returns the resulting src or an error +// if any. +func format(src []byte) ([]byte, error) { + formatted, err := Format(src) + if err != nil { + return nil, err + } + + // make sure formatted output is syntactically correct + if _, err := parser.Parse(formatted); err != nil { + return nil, fmt.Errorf("parse: %s\n%s", err, formatted) + } + + return formatted, nil +} + +// lineAt returns the line in text starting at offset offs. +func lineAt(text []byte, offs int) []byte { + i := offs + for i < len(text) && text[i] != '\n' { + i++ + } + return text[offs:i] +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden new file mode 100644 index 000000000..9d4b072a0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden @@ -0,0 +1,36 @@ +// A standalone comment is a comment which is not attached to any kind of node + +// This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = ["fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1, 2] // another line here + +# Another comment +variable = { + description = "bar" # another yooo + + foo { + # Nested standalone + + bar = "fatih" + } +} + +// lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input new file mode 100644 index 000000000..57c37ac1d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input @@ -0,0 +1,37 @@ +// A standalone comment is a comment which is not attached to any kind of node + + // This comes from Terraform, as a test +variable "foo" { + # Standalone comment should be still here + + default = "bar" + description = "bar" # yooo +} + +/* This is a multi line standalone +comment*/ + + +// fatih arslan +/* This is a developer test +account and a multine comment */ +developer = [ "fatih", "arslan"] // fatih arslan + +# One line here +numbers = [1,2] // another line here + + # Another comment +variable = { + description = "bar" # another yooo + foo { + # Nested standalone + + bar = "fatih" + } +} + + // lead comment +foo { + bar = "fatih" // line comment 2 +} // line comment 3 + diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden new file mode 100644 index 000000000..6ff21504c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden @@ -0,0 +1,32 @@ +aligned { + # We have some aligned items below + foo = "fatih" # yoo1 + default = "bar" # yoo2 + bar = "bar and foo" # yoo3 + + default = { + bar = "example" + } + + #deneme arslan + fatih = ["fatih"] # yoo4 + + #fatih arslan + fatiharslan = ["arslan"] // yoo5 + + default = { + bar = "example" + } + + security_groups = [ + "foo", # kenya 1 + "${aws_security_group.firewall.foo}", # kenya 2 + ] + + security_groups2 = [ + "foo", # kenya 1 + "bar", # kenya 1.5 + "${aws_security_group.firewall.foo}", # kenya 2 + "foobar", # kenya 3 + ] +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input new file mode 100644 index 000000000..bd43ab1ad --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input @@ -0,0 +1,28 @@ +aligned { +# We have some aligned items below + foo = "fatih" # yoo1 + default = "bar" # yoo2 + bar = "bar and foo" # yoo3 + default = { + bar = "example" + } + #deneme arslan + fatih = ["fatih"] # yoo4 + #fatih arslan + fatiharslan = ["arslan"] // yoo5 + default = { + bar = "example" + } + +security_groups = [ + "foo", # kenya 1 + "${aws_security_group.firewall.foo}", # kenya 2 +] + +security_groups2 = [ + "foo", # kenya 1 + "bar", # kenya 1.5 + "${aws_security_group.firewall.foo}", # kenya 2 + "foobar", # kenya 3 +] +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden new file mode 100644 index 000000000..e778eafa3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden @@ -0,0 +1,13 @@ +banana = [ + # I really want to comment this item in the array. + "a", + + # This as well + "b", + + "c", # And C + "d", + + # And another + "e", +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input new file mode 100644 index 000000000..e778eafa3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input @@ -0,0 +1,13 @@ +banana = [ + # I really want to comment this item in the array. + "a", + + # This as well + "b", + + "c", # And C + "d", + + # And another + "e", +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden new file mode 100644 index 000000000..dbeae36a8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden @@ -0,0 +1,6 @@ +resource "blah" "blah" {} + +// +// +// + diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input new file mode 100644 index 000000000..68c4c282e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input @@ -0,0 +1,5 @@ +resource "blah" "blah" {} + +// +// +// diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden new file mode 100644 index 000000000..74c4ccd89 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_multiline_indent.golden @@ -0,0 +1,12 @@ +resource "provider" "resource" { + /* + SPACE_SENSITIVE_CODE = < 0 { + s.err("unexpected null character (0x00)") + return eof + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go new file mode 100644 index 000000000..4f2c9cbe0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go @@ -0,0 +1,591 @@ +package scanner + +import ( + "bytes" + "fmt" + "testing" + + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +var f100 = strings.Repeat("f", 100) + +type tokenPair struct { + tok token.Type + text string +} + +var tokenLists = map[string][]tokenPair{ + "comment": []tokenPair{ + {token.COMMENT, "//"}, + {token.COMMENT, "////"}, + {token.COMMENT, "// comment"}, + {token.COMMENT, "// /* comment */"}, + {token.COMMENT, "// // comment //"}, + {token.COMMENT, "//" + f100}, + {token.COMMENT, "#"}, + {token.COMMENT, "##"}, + {token.COMMENT, "# comment"}, + {token.COMMENT, "# /* comment */"}, + {token.COMMENT, "# # comment #"}, + {token.COMMENT, "#" + f100}, + {token.COMMENT, "/**/"}, + {token.COMMENT, "/***/"}, + {token.COMMENT, "/* comment */"}, + {token.COMMENT, "/* // comment */"}, + {token.COMMENT, "/* /* comment */"}, + {token.COMMENT, "/*\n comment\n*/"}, + {token.COMMENT, "/*" + f100 + "*/"}, + }, + "operator": []tokenPair{ + {token.LBRACK, "["}, + {token.LBRACE, "{"}, + {token.COMMA, ","}, + {token.PERIOD, "."}, + {token.RBRACK, "]"}, + {token.RBRACE, "}"}, + {token.ASSIGN, "="}, + {token.ADD, "+"}, + {token.SUB, "-"}, + }, + "bool": []tokenPair{ + {token.BOOL, "true"}, + {token.BOOL, "false"}, + }, + "ident": []tokenPair{ + {token.IDENT, "a"}, + {token.IDENT, "a0"}, + {token.IDENT, "foobar"}, + {token.IDENT, "foo-bar"}, + {token.IDENT, "abc123"}, + {token.IDENT, "LGTM"}, + {token.IDENT, "_"}, + {token.IDENT, "_abc123"}, + {token.IDENT, "abc123_"}, + {token.IDENT, "_abc_123_"}, + {token.IDENT, "_äöü"}, + {token.IDENT, "_本"}, + {token.IDENT, "äöü"}, + {token.IDENT, "本"}, + {token.IDENT, "a۰۱۸"}, + {token.IDENT, "foo६४"}, + {token.IDENT, "bar9876"}, + }, + "heredoc": []tokenPair{ + {token.HEREDOC, "< 0 for %q", s.ErrorCount, src) + } +} + +func testTokenList(t *testing.T, tokenList []tokenPair) { + // create artifical source code + buf := new(bytes.Buffer) + for _, ident := range tokenList { + fmt.Fprintf(buf, "%s\n", ident.text) + } + + s := New(buf.Bytes()) + for _, ident := range tokenList { + tok := s.Scan() + if tok.Type != ident.tok { + t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text) + } + + if tok.Text != ident.text { + t.Errorf("text = %q want %q", tok.String(), ident.text) + } + + } +} + +func countNewlines(s string) int { + n := 0 + for _, ch := range s { + if ch == '\n' { + n++ + } + } + return n +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 000000000..5f981eaa2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go new file mode 100644 index 000000000..65be375d9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go @@ -0,0 +1,96 @@ +package strconv + +import "testing" + +type quoteTest struct { + in string + out string + ascii string +} + +var quotetests = []quoteTest{ + {"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`}, + {"\\", `"\\"`, `"\\"`}, + {"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`}, + {"\u263a", `"☺"`, `"\u263a"`}, + {"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`}, + {"\x04", `"\x04"`, `"\x04"`}, +} + +type unQuoteTest struct { + in string + out string +} + +var unquotetests = []unQuoteTest{ + {`""`, ""}, + {`"a"`, "a"}, + {`"abc"`, "abc"}, + {`"☺"`, "☺"}, + {`"hello world"`, "hello world"}, + {`"\xFF"`, "\xFF"}, + {`"\377"`, "\377"}, + {`"\u1234"`, "\u1234"}, + {`"\U00010111"`, "\U00010111"}, + {`"\U0001011111"`, "\U0001011111"}, + {`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""}, + {`"'"`, "'"}, + {`"${file("foo")}"`, `${file("foo")}`}, + {`"${file("\"foo\"")}"`, `${file("\"foo\"")}`}, + {`"echo ${var.region}${element(split(",",var.zones),0)}"`, + `echo ${var.region}${element(split(",",var.zones),0)}`}, + {`"${HH\\:mm\\:ss}"`, `${HH\\:mm\\:ss}`}, + {`"${\n}"`, `${\n}`}, +} + +var misquoted = []string{ + ``, + `"`, + `"a`, + `"'`, + `b"`, + `"\"`, + `"\9"`, + `"\19"`, + `"\129"`, + `'\'`, + `'\9'`, + `'\19'`, + `'\129'`, + `'ab'`, + `"\x1!"`, + `"\U12345678"`, + `"\z"`, + "`", + "`xxx", + "`\"", + `"\'"`, + `'\"'`, + "\"\n\"", + "\"\\n\n\"", + "'\n'", + `"${"`, + `"${foo{}"`, + "\"${foo}\n\"", +} + +func TestUnquote(t *testing.T) { + for _, tt := range unquotetests { + if out, err := Unquote(tt.in); err != nil || out != tt.out { + t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out) + } + } + + // run the quote tests too, backward + for _, tt := range quotetests { + if in, err := Unquote(tt.out); in != tt.in { + t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in) + } + } + + for _, s := range misquoted { + if out, err := Unquote(s); out != "" || err != ErrSyntax { + t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax) + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl new file mode 100644 index 000000000..78c267582 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl @@ -0,0 +1,4 @@ +foo = [ + "1", + "2", # comment +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl new file mode 100644 index 000000000..eb5a99a69 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl @@ -0,0 +1,6 @@ +resource = [{ + "foo": { + "bar": {}, + "baz": [1, 2, "foo"], + } +}] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl new file mode 100644 index 000000000..1ff7f29fd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl @@ -0,0 +1,15 @@ +// Foo + +/* Bar */ + +/* +/* +Baz +*/ + +# Another + +# Multiple +# Lines + +foo = "bar" diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl new file mode 100644 index 000000000..fec56017d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl @@ -0,0 +1 @@ +# Hello diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl new file mode 100644 index 000000000..cccb5b06f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl @@ -0,0 +1,42 @@ +// This comes from Terraform, as a test +variable "foo" { + default = "bar" + description = "bar" +} + +provider "aws" { + access_key = "foo" + secret_key = "bar" +} + +provider "do" { + api_key = "${var.foo}" +} + +resource "aws_security_group" "firewall" { + count = 5 +} + +resource aws_instance "web" { + ami = "${var.foo}" + security_groups = [ + "foo", + "${aws_security_group.firewall.foo}" + ] + + network_interface { + device_index = 0 + description = "Main network interface" + } +} + +resource "aws_instance" "db" { + security_groups = "${aws_security_group.firewall.*.id}" + VPC = "foo" + + depends_on = ["aws_instance.web"] +} + +output "web_ip" { + value = "${aws_instance.web.private_ip}" +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl new file mode 100644 index 000000000..0007aaf5f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl @@ -0,0 +1 @@ +foo.bar = "baz" diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl new file mode 100644 index 000000000..059d4ce65 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl @@ -0,0 +1 @@ +foo = [1, 2, "foo"] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl new file mode 100644 index 000000000..50f4218ac --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl @@ -0,0 +1 @@ +foo = [1, 2, "foo",] diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl new file mode 100644 index 000000000..029c54b0c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl @@ -0,0 +1,2 @@ +foo = "bar" +key = 7 diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl new file mode 100644 index 000000000..e9f77cae9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl @@ -0,0 +1,3 @@ +default = { + "eu-west-1": "ami-b1cf19c6", +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl new file mode 100644 index 000000000..92592fbb3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl @@ -0,0 +1,5 @@ +// This is a test structure for the lexer +foo bar "baz" { + key = 7 + foo = "bar" +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl new file mode 100644 index 000000000..7229a1f01 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl @@ -0,0 +1,5 @@ +foo { + value = 7 + "value" = 8 + "complex::value" = 9 +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl new file mode 100644 index 000000000..4d156ddea --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl @@ -0,0 +1 @@ +resource "foo" "bar" {} diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl new file mode 100644 index 000000000..cf2747ea1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl @@ -0,0 +1,7 @@ +foo = "bar" +bar = 7 +baz = [1,2,3] +foo = -12 +bar = 3.14159 +foo = true +bar = false diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 000000000..e37c0664e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // <