pax_global_header00006660000000000000000000000064133403746300014515gustar00rootroot0000000000000052 comment=8cb6e5b959231cc1119e43259c4a608f9c51a241 hcl-1.0.0/000077500000000000000000000000001334037463000122615ustar00rootroot00000000000000hcl-1.0.0/.github/000077500000000000000000000000001334037463000136215ustar00rootroot00000000000000hcl-1.0.0/.github/ISSUE_TEMPLATE.md000066400000000000000000000005011334037463000163220ustar00rootroot00000000000000### HCL Template ```hcl # Place your HCL configuration file here ``` ### Expected behavior What should have happened? ### Actual behavior What actually happened? ### Steps to reproduce 1. 2. 3. ### References Are there any other GitHub issues (open or closed) that should be linked here? For example: - GH-1234 - ... hcl-1.0.0/.gitignore000066400000000000000000000001131334037463000142440ustar00rootroot00000000000000y.output # ignore intellij files .idea *.iml *.ipr *.iws *.test hcl-1.0.0/.travis.yml000066400000000000000000000001421334037463000143670ustar00rootroot00000000000000sudo: false language: go go: - 1.x - tip branches: only: - master script: make test hcl-1.0.0/LICENSE000066400000000000000000000371511334037463000132750ustar00rootroot00000000000000Mozilla Public License, version 2.0 1. Definitions 1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor’s Contribution. 1.3. “Contribution” means Covered Software of a particular Contributor. 1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. “Executable Form” means any form of the work other than Source Code Form. 1.7. “Larger Work” means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. “License” means this document. 1.9. “Licensable” means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. “Modifications” means any of the following: a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. 1.11. “Patent Claims” of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. “Source Code Form” means the form of the work preferred for making modifications. 1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or c. under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: a. such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and b. You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients’ rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party’s negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party’s ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - “Incompatible With Secondary Licenses” Notice This Source Code Form is “Incompatible With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0. hcl-1.0.0/Makefile000066400000000000000000000004101334037463000137140ustar00rootroot00000000000000TEST?=./... default: test fmt: generate go fmt ./... test: generate go get -t ./... go test $(TEST) $(TESTARGS) generate: go generate ./... updatedeps: go get -u golang.org/x/tools/cmd/stringer .PHONY: default generate test updatedeps hcl-1.0.0/README.md000066400000000000000000000102661334037463000135450ustar00rootroot00000000000000# HCL [![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) HCL (HashiCorp Configuration Language) is a configuration language built by HashiCorp. The goal of HCL is to build a structured configuration language that is both human and machine friendly for use with command-line tools, but specifically targeted towards DevOps tools, servers, etc. HCL is also fully JSON compatible. That is, JSON can be used as completely valid input to a system expecting HCL. This helps makes systems interoperable with other systems. HCL is heavily inspired by [libucl](https://github.com/vstakhov/libucl), nginx configuration, and others similar. ## Why? A common question when viewing HCL is to ask the question: why not JSON, YAML, etc.? Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) used a variety of configuration languages from full programming languages such as Ruby to complete data structure languages such as JSON. What we learned is that some people wanted human-friendly configuration languages and some people wanted machine-friendly languages. JSON fits a nice balance in this, but is fairly verbose and most importantly doesn't support comments. With YAML, we found that beginners had a really hard time determining what the actual structure was, and ended up guessing more often than not whether to use a hyphen, colon, etc. in order to represent some configuration key. Full programming languages such as Ruby enable complex behavior a configuration language shouldn't usually allow, and also forces people to learn some set of Ruby. Because of this, we decided to create our own configuration language that is JSON-compatible. Our configuration language (HCL) is designed to be written and modified by humans. The API for HCL allows JSON as an input so that it is also machine-friendly (machines can generate JSON instead of trying to generate HCL). Our goal with HCL is not to alienate other configuration languages. It is instead to provide HCL as a specialized language for our tools, and JSON as the interoperability layer. ## Syntax For a complete grammar, please see the parser itself. A high-level overview of the syntax and grammar is listed here. * Single line comments start with `#` or `//` * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments are not allowed. A multi-line comment (also known as a block comment) terminates at the first `*/` found. * Values are assigned with the syntax `key = value` (whitespace doesn't matter). The value can be any primitive: a string, number, boolean, object, or list. * Strings are double-quoted and can contain any UTF-8 characters. Example: `"Hello, World"` * Multi-line strings start with `<- echo %Path% go version go env go get -t ./... build_script: - cmd: go test -v ./... hcl-1.0.0/decoder.go000066400000000000000000000436201334037463000142220ustar00rootroot00000000000000package hcl import ( "errors" "fmt" "reflect" "sort" "strconv" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/parser" "github.com/hashicorp/hcl/hcl/token" ) // This is the tag to use with structures to have settings for HCL const tagName = "hcl" var ( // nodeType holds a reference to the type of ast.Node nodeType reflect.Type = findNodeType() ) // Unmarshal accepts a byte slice as input and writes the // data to the value pointed to by v. func Unmarshal(bs []byte, v interface{}) error { root, err := parse(bs) if err != nil { return err } return DecodeObject(v, root) } // Decode reads the given input and decodes it into the structure // given by `out`. func Decode(out interface{}, in string) error { obj, err := Parse(in) if err != nil { return err } return DecodeObject(out, obj) } // DecodeObject is a lower-level version of Decode. It decodes a // raw Object into the given output. func DecodeObject(out interface{}, n ast.Node) error { val := reflect.ValueOf(out) if val.Kind() != reflect.Ptr { return errors.New("result must be a pointer") } // If we have the file, we really decode the root node if f, ok := n.(*ast.File); ok { n = f.Node } var d decoder return d.decode("root", n, val.Elem()) } type decoder struct { stack []reflect.Kind } func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { k := result // If we have an interface with a valid value, we use that // for the check. if result.Kind() == reflect.Interface { elem := result.Elem() if elem.IsValid() { k = elem } } // Push current onto stack unless it is an interface. if k.Kind() != reflect.Interface { d.stack = append(d.stack, k.Kind()) // Schedule a pop defer func() { d.stack = d.stack[:len(d.stack)-1] }() } switch k.Kind() { case reflect.Bool: return d.decodeBool(name, node, result) case reflect.Float32, reflect.Float64: return d.decodeFloat(name, node, result) case reflect.Int, reflect.Int32, reflect.Int64: return d.decodeInt(name, node, result) case reflect.Interface: // When we see an interface, we make our own thing return d.decodeInterface(name, node, result) case reflect.Map: return d.decodeMap(name, node, result) case reflect.Ptr: return d.decodePtr(name, node, result) case reflect.Slice: return d.decodeSlice(name, node, result) case reflect.String: return d.decodeString(name, node, result) case reflect.Struct: return d.decodeStruct(name, node, result) default: return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), } } } func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: if n.Token.Type == token.BOOL { v, err := strconv.ParseBool(n.Token.Text) if err != nil { return err } result.Set(reflect.ValueOf(v)) return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type %T", name, node), } } func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { v, err := strconv.ParseFloat(n.Token.Text, 64) if err != nil { return err } result.Set(reflect.ValueOf(v).Convert(result.Type())) return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type %T", name, node), } } func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: switch n.Token.Type { case token.NUMBER: v, err := strconv.ParseInt(n.Token.Text, 0, 0) if err != nil { return err } if result.Kind() == reflect.Interface { result.Set(reflect.ValueOf(int(v))) } else { result.SetInt(v) } return nil case token.STRING: v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) if err != nil { return err } if result.Kind() == reflect.Interface { result.Set(reflect.ValueOf(int(v))) } else { result.SetInt(v) } return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type %T", name, node), } } func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { // When we see an ast.Node, we retain the value to enable deferred decoding. // Very useful in situations where we want to preserve ast.Node information // like Pos if result.Type() == nodeType && result.CanSet() { result.Set(reflect.ValueOf(node)) return nil } var set reflect.Value redecode := true // For testing types, ObjectType should just be treated as a list. We // set this to a temporary var because we want to pass in the real node. testNode := node if ot, ok := node.(*ast.ObjectType); ok { testNode = ot.List } switch n := testNode.(type) { case *ast.ObjectList: // If we're at the root or we're directly within a slice, then we // decode objects into map[string]interface{}, otherwise we decode // them into lists. if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { var temp map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeMap( reflect.MapOf( reflect.TypeOf(""), tempVal.Type().Elem())) set = result } else { var temp []map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeSlice( reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) set = result } case *ast.ObjectType: // If we're at the root or we're directly within a slice, then we // decode objects into map[string]interface{}, otherwise we decode // them into lists. if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { var temp map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeMap( reflect.MapOf( reflect.TypeOf(""), tempVal.Type().Elem())) set = result } else { var temp []map[string]interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeSlice( reflect.SliceOf(tempVal.Type().Elem()), 0, 1) set = result } case *ast.ListType: var temp []interface{} tempVal := reflect.ValueOf(temp) result := reflect.MakeSlice( reflect.SliceOf(tempVal.Type().Elem()), 0, 0) set = result case *ast.LiteralType: switch n.Token.Type { case token.BOOL: var result bool set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) case token.FLOAT: var result float64 set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) case token.NUMBER: var result int set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) case token.STRING, token.HEREDOC: set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) default: return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), } } default: return fmt.Errorf( "%s: cannot decode into interface: %T", name, node) } // Set the result to what its supposed to be, then reset // result so we don't reflect into this method anymore. result.Set(set) if redecode { // Revisit the node so that we can use the newly instantiated // thing and populate it. if err := d.decode(name, node, result); err != nil { return err } } return nil } func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { if item, ok := node.(*ast.ObjectItem); ok { node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} } if ot, ok := node.(*ast.ObjectType); ok { node = ot.List } n, ok := node.(*ast.ObjectList) if !ok { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), } } // If we have an interface, then we can address the interface, // but not the slice itself, so get the element but set the interface set := result if result.Kind() == reflect.Interface { result = result.Elem() } resultType := result.Type() resultElemType := resultType.Elem() resultKeyType := resultType.Key() if resultKeyType.Kind() != reflect.String { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: map must have string keys", name), } } // Make a map if it is nil resultMap := result if result.IsNil() { resultMap = reflect.MakeMap( reflect.MapOf(resultKeyType, resultElemType)) } // Go through each element and decode it. done := make(map[string]struct{}) for _, item := range n.Items { if item.Val == nil { continue } // github.com/hashicorp/terraform/issue/5740 if len(item.Keys) == 0 { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: map must have string keys", name), } } // Get the key we're dealing with, which is the first item keyStr := item.Keys[0].Token.Value().(string) // If we've already processed this key, then ignore it if _, ok := done[keyStr]; ok { continue } // Determine the value. If we have more than one key, then we // get the objectlist of only these keys. itemVal := item.Val if len(item.Keys) > 1 { itemVal = n.Filter(keyStr) done[keyStr] = struct{}{} } // Make the field name fieldName := fmt.Sprintf("%s.%s", name, keyStr) // Get the key/value as reflection values key := reflect.ValueOf(keyStr) val := reflect.Indirect(reflect.New(resultElemType)) // If we have a pre-existing value in the map, use that oldVal := resultMap.MapIndex(key) if oldVal.IsValid() { val.Set(oldVal) } // Decode! if err := d.decode(fieldName, itemVal, val); err != nil { return err } // Set the value on the map resultMap.SetMapIndex(key, val) } // Set the final map if we can set.Set(resultMap) return nil } func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { // Create an element of the concrete (non pointer) type and decode // into that. Then set the value of the pointer to this type. resultType := result.Type() resultElemType := resultType.Elem() val := reflect.New(resultElemType) if err := d.decode(name, node, reflect.Indirect(val)); err != nil { return err } result.Set(val) return nil } func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { // If we have an interface, then we can address the interface, // but not the slice itself, so get the element but set the interface set := result if result.Kind() == reflect.Interface { result = result.Elem() } // Create the slice if it isn't nil resultType := result.Type() resultElemType := resultType.Elem() if result.IsNil() { resultSliceType := reflect.SliceOf(resultElemType) result = reflect.MakeSlice( resultSliceType, 0, 0) } // Figure out the items we'll be copying into the slice var items []ast.Node switch n := node.(type) { case *ast.ObjectList: items = make([]ast.Node, len(n.Items)) for i, item := range n.Items { items[i] = item } case *ast.ObjectType: items = []ast.Node{n} case *ast.ListType: items = n.List default: return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("unknown slice type: %T", node), } } for i, item := range items { fieldName := fmt.Sprintf("%s[%d]", name, i) // Decode val := reflect.Indirect(reflect.New(resultElemType)) // if item is an object that was decoded from ambiguous JSON and // flattened, make sure it's expanded if it needs to decode into a // defined structure. item := expandObject(item, val) if err := d.decode(fieldName, item, val); err != nil { return err } // Append it onto the slice result = reflect.Append(result, val) } set.Set(result) return nil } // expandObject detects if an ambiguous JSON object was flattened to a List which // should be decoded into a struct, and expands the ast to properly deocode. func expandObject(node ast.Node, result reflect.Value) ast.Node { item, ok := node.(*ast.ObjectItem) if !ok { return node } elemType := result.Type() // our target type must be a struct switch elemType.Kind() { case reflect.Ptr: switch elemType.Elem().Kind() { case reflect.Struct: //OK default: return node } case reflect.Struct: //OK default: return node } // A list value will have a key and field name. If it had more fields, // it wouldn't have been flattened. if len(item.Keys) != 2 { return node } keyToken := item.Keys[0].Token item.Keys = item.Keys[1:] // we need to un-flatten the ast enough to decode newNode := &ast.ObjectItem{ Keys: []*ast.ObjectKey{ &ast.ObjectKey{ Token: keyToken, }, }, Val: &ast.ObjectType{ List: &ast.ObjectList{ Items: []*ast.ObjectItem{item}, }, }, } return newNode } func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: switch n.Token.Type { case token.NUMBER: result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) return nil case token.STRING, token.HEREDOC: result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) return nil } } return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unknown type for string %T", name, node), } } func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { var item *ast.ObjectItem if it, ok := node.(*ast.ObjectItem); ok { item = it node = it.Val } if ot, ok := node.(*ast.ObjectType); ok { node = ot.List } // Handle the special case where the object itself is a literal. Previously // the yacc parser would always ensure top-level elements were arrays. The new // parser does not make the same guarantees, thus we need to convert any // top-level literal elements into a list. if _, ok := node.(*ast.LiteralType); ok && item != nil { node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} } list, ok := node.(*ast.ObjectList) if !ok { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), } } // This slice will keep track of all the structs we'll be decoding. // There can be more than one struct if there are embedded structs // that are squashed. structs := make([]reflect.Value, 1, 5) structs[0] = result // Compile the list of all the fields that we're going to be decoding // from all the structs. type field struct { field reflect.StructField val reflect.Value } fields := []field{} for len(structs) > 0 { structVal := structs[0] structs = structs[1:] structType := structVal.Type() for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") // Ignore fields with tag name "-" if tagParts[0] == "-" { continue } if fieldType.Anonymous { fieldKind := fieldType.Type.Kind() if fieldKind != reflect.Struct { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: unsupported type to struct: %s", fieldType.Name, fieldKind), } } // We have an embedded field. We "squash" the fields down // if specified in the tag. squash := false for _, tag := range tagParts[1:] { if tag == "squash" { squash = true break } } if squash { structs = append( structs, result.FieldByName(fieldType.Name)) continue } } // Normal struct field, store it away fields = append(fields, field{fieldType, structVal.Field(i)}) } } usedKeys := make(map[string]struct{}) decodedFields := make([]string, 0, len(fields)) decodedFieldsVal := make([]reflect.Value, 0) unusedKeysVal := make([]reflect.Value, 0) for _, f := range fields { field, fieldValue := f.field, f.val if !fieldValue.IsValid() { // This should never happen panic("field is not valid") } // If we can't set the field, then it is unexported or something, // and we just continue onwards. if !fieldValue.CanSet() { continue } fieldName := field.Name tagValue := field.Tag.Get(tagName) tagParts := strings.SplitN(tagValue, ",", 2) if len(tagParts) >= 2 { switch tagParts[1] { case "decodedFields": decodedFieldsVal = append(decodedFieldsVal, fieldValue) continue case "key": if item == nil { return &parser.PosError{ Pos: node.Pos(), Err: fmt.Errorf("%s: %s asked for 'key', impossible", name, fieldName), } } fieldValue.SetString(item.Keys[0].Token.Value().(string)) continue case "unusedKeys": unusedKeysVal = append(unusedKeysVal, fieldValue) continue } } if tagParts[0] != "" { fieldName = tagParts[0] } // Determine the element we'll use to decode. If it is a single // match (only object with the field), then we decode it exactly. // If it is a prefix match, then we decode the matches. filter := list.Filter(fieldName) prefixMatches := filter.Children() matches := filter.Elem() if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { continue } // Track the used key usedKeys[fieldName] = struct{}{} // Create the field name and decode. We range over the elements // because we actually want the value. fieldName = fmt.Sprintf("%s.%s", name, fieldName) if len(prefixMatches.Items) > 0 { if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { return err } } for _, match := range matches.Items { var decodeNode ast.Node = match.Val if ot, ok := decodeNode.(*ast.ObjectType); ok { decodeNode = &ast.ObjectList{Items: ot.List.Items} } if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { return err } } decodedFields = append(decodedFields, field.Name) } if len(decodedFieldsVal) > 0 { // Sort it so that it is deterministic sort.Strings(decodedFields) for _, v := range decodedFieldsVal { v.Set(reflect.ValueOf(decodedFields)) } } return nil } // findNodeType returns the type of ast.Node func findNodeType() reflect.Type { var nodeContainer struct { Node ast.Node } value := reflect.ValueOf(nodeContainer).FieldByName("Node") return value.Type() } hcl-1.0.0/decoder_test.go000066400000000000000000000530451334037463000152630ustar00rootroot00000000000000package hcl import ( "io/ioutil" "path/filepath" "reflect" "testing" "time" "github.com/davecgh/go-spew/spew" "github.com/hashicorp/hcl/hcl/ast" ) func TestDecode_interface(t *testing.T) { cases := []struct { File string Err bool Out interface{} }{ { "basic.hcl", false, map[string]interface{}{ "foo": "bar", "bar": "${file(\"bing/bong.txt\")}", }, }, { "basic_squish.hcl", false, map[string]interface{}{ "foo": "bar", "bar": "${file(\"bing/bong.txt\")}", "foo-bar": "baz", }, }, { "empty.hcl", false, map[string]interface{}{ "resource": []map[string]interface{}{ map[string]interface{}{ "foo": []map[string]interface{}{ map[string]interface{}{}, }, }, }, }, }, { "tfvars.hcl", false, map[string]interface{}{ "regularvar": "Should work", "map.key1": "Value", "map.key2": "Other value", }, }, { "escape.hcl", false, map[string]interface{}{ "foo": "bar\"baz\\n", "qux": "back\\slash", "bar": "new\nline", "qax": `slash\:colon`, "nested": `${HH\\:mm\\:ss}`, "nestedquotes": `${"\"stringwrappedinquotes\""}`, }, }, { "float.hcl", false, map[string]interface{}{ "a": 1.02, "b": 2, }, }, { "multiline_bad.hcl", true, nil, }, { "multiline_literal.hcl", true, nil, }, { "multiline_literal_with_hil.hcl", false, map[string]interface{}{"multiline_literal_with_hil": "${hello\n world}"}, }, { "multiline_no_marker.hcl", true, nil, }, { "multiline.hcl", false, map[string]interface{}{"foo": "bar\nbaz\n"}, }, { "multiline_indented.hcl", false, map[string]interface{}{"foo": " bar\n baz\n"}, }, { "multiline_no_hanging_indent.hcl", false, map[string]interface{}{"foo": " baz\n bar\n foo\n"}, }, { "multiline_no_eof.hcl", false, map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"}, }, { "multiline.json", false, map[string]interface{}{"foo": "bar\nbaz"}, }, { "null_strings.json", false, map[string]interface{}{ "module": []map[string]interface{}{ map[string]interface{}{ "app": []map[string]interface{}{ map[string]interface{}{"foo": ""}, }, }, }, }, }, { "scientific.json", false, map[string]interface{}{ "a": 1e-10, "b": 1e+10, "c": 1e10, "d": 1.2e-10, "e": 1.2e+10, "f": 1.2e10, }, }, { "scientific.hcl", false, map[string]interface{}{ "a": 1e-10, "b": 1e+10, "c": 1e10, "d": 1.2e-10, "e": 1.2e+10, "f": 1.2e10, }, }, { "terraform_heroku.hcl", false, map[string]interface{}{ "name": "terraform-test-app", "config_vars": []map[string]interface{}{ map[string]interface{}{ "FOO": "bar", }, }, }, }, { "structure_multi.hcl", false, map[string]interface{}{ "foo": []map[string]interface{}{ map[string]interface{}{ "baz": []map[string]interface{}{ map[string]interface{}{"key": 7}, }, }, map[string]interface{}{ "bar": []map[string]interface{}{ map[string]interface{}{"key": 12}, }, }, }, }, }, { "structure_multi.json", false, map[string]interface{}{ "foo": []map[string]interface{}{ map[string]interface{}{ "baz": []map[string]interface{}{ map[string]interface{}{"key": 7}, }, }, map[string]interface{}{ "bar": []map[string]interface{}{ map[string]interface{}{"key": 12}, }, }, }, }, }, { "list_of_lists.hcl", false, map[string]interface{}{ "foo": []interface{}{ []interface{}{"foo"}, []interface{}{"bar"}, }, }, }, { "list_of_maps.hcl", false, map[string]interface{}{ "foo": []interface{}{ map[string]interface{}{"somekey1": "someval1"}, map[string]interface{}{"somekey2": "someval2", "someextrakey": "someextraval"}, }, }, }, { "assign_deep.hcl", false, map[string]interface{}{ "resource": []interface{}{ map[string]interface{}{ "foo": []interface{}{ map[string]interface{}{ "bar": []map[string]interface{}{ map[string]interface{}{}}}}}}}, }, { "structure_list.hcl", false, map[string]interface{}{ "foo": []map[string]interface{}{ map[string]interface{}{ "key": 7, }, map[string]interface{}{ "key": 12, }, }, }, }, { "structure_list.json", false, map[string]interface{}{ "foo": []map[string]interface{}{ map[string]interface{}{ "key": 7, }, map[string]interface{}{ "key": 12, }, }, }, }, { "structure_list_deep.json", false, map[string]interface{}{ "bar": []map[string]interface{}{ map[string]interface{}{ "foo": []map[string]interface{}{ map[string]interface{}{ "name": "terraform_example", "ingress": []map[string]interface{}{ map[string]interface{}{ "from_port": 22, }, map[string]interface{}{ "from_port": 80, }, }, }, }, }, }, }, }, { "structure_list_empty.json", false, map[string]interface{}{ "foo": []interface{}{}, }, }, { "nested_block_comment.hcl", false, map[string]interface{}{ "bar": "value", }, }, { "unterminated_block_comment.hcl", true, nil, }, { "unterminated_brace.hcl", true, nil, }, { "nested_provider_bad.hcl", true, nil, }, { "object_list.json", false, map[string]interface{}{ "resource": []map[string]interface{}{ map[string]interface{}{ "aws_instance": []map[string]interface{}{ map[string]interface{}{ "db": []map[string]interface{}{ map[string]interface{}{ "vpc": "foo", "provisioner": []map[string]interface{}{ map[string]interface{}{ "file": []map[string]interface{}{ map[string]interface{}{ "source": "foo", "destination": "bar", }, }, }, }, }, }, }, }, }, }, }, }, // Terraform GH-8295 sanity test that basic decoding into // interface{} works. { "terraform_variable_invalid.json", false, map[string]interface{}{ "variable": []map[string]interface{}{ map[string]interface{}{ "whatever": "abc123", }, }, }, }, { "interpolate.json", false, map[string]interface{}{ "default": `${replace("europe-west", "-", " ")}`, }, }, { "block_assign.hcl", true, nil, }, { "escape_backslash.hcl", false, map[string]interface{}{ "output": []map[string]interface{}{ map[string]interface{}{ "one": `${replace(var.sub_domain, ".", "\\.")}`, "two": `${replace(var.sub_domain, ".", "\\\\.")}`, "many": `${replace(var.sub_domain, ".", "\\\\\\\\.")}`, }, }, }, }, { "git_crypt.hcl", true, nil, }, { "object_with_bool.hcl", false, map[string]interface{}{ "path": []map[string]interface{}{ map[string]interface{}{ "policy": "write", "permissions": []map[string]interface{}{ map[string]interface{}{ "bool": []interface{}{false}, }, }, }, }, }, }, } for _, tc := range cases { t.Run(tc.File, func(t *testing.T) { d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File)) if err != nil { t.Fatalf("err: %s", err) } var out interface{} err = Decode(&out, string(d)) if (err != nil) != tc.Err { t.Fatalf("Input: %s\n\nError: %s", tc.File, err) } if !reflect.DeepEqual(out, tc.Out) { t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) } var v interface{} err = Unmarshal(d, &v) if (err != nil) != tc.Err { t.Fatalf("Input: %s\n\nError: %s", tc.File, err) } if !reflect.DeepEqual(v, tc.Out) { t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out) } }) } } func TestDecode_interfaceInline(t *testing.T) { cases := []struct { Value string Err bool Out interface{} }{ {"t t e{{}}", true, nil}, {"t=0t d {}", true, map[string]interface{}{"t": 0}}, {"v=0E0v d{}", true, map[string]interface{}{"v": float64(0)}}, } for _, tc := range cases { t.Logf("Testing: %q", tc.Value) var out interface{} err := Decode(&out, tc.Value) if (err != nil) != tc.Err { t.Fatalf("Input: %q\n\nError: %s", tc.Value, err) } if !reflect.DeepEqual(out, tc.Out) { t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out) } var v interface{} err = Unmarshal([]byte(tc.Value), &v) if (err != nil) != tc.Err { t.Fatalf("Input: %q\n\nError: %s", tc.Value, err) } if !reflect.DeepEqual(v, tc.Out) { t.Fatalf("Input: %q. Actual, Expected.\n\n%#v\n\n%#v", tc.Value, out, tc.Out) } } } func TestDecode_equal(t *testing.T) { cases := []struct { One, Two string }{ { "basic.hcl", "basic.json", }, { "float.hcl", "float.json", }, /* { "structure.hcl", "structure.json", }, */ { "structure.hcl", "structure_flat.json", }, { "terraform_heroku.hcl", "terraform_heroku.json", }, } for _, tc := range cases { p1 := filepath.Join(fixtureDir, tc.One) p2 := filepath.Join(fixtureDir, tc.Two) d1, err := ioutil.ReadFile(p1) if err != nil { t.Fatalf("err: %s", err) } d2, err := ioutil.ReadFile(p2) if err != nil { t.Fatalf("err: %s", err) } var i1, i2 interface{} err = Decode(&i1, string(d1)) if err != nil { t.Fatalf("err: %s", err) } err = Decode(&i2, string(d2)) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(i1, i2) { t.Fatalf( "%s != %s\n\n%#v\n\n%#v", tc.One, tc.Two, i1, i2) } } } func TestDecode_flatMap(t *testing.T) { var val map[string]map[string]string err := Decode(&val, testReadFile(t, "structure_flatmap.hcl")) if err != nil { t.Fatalf("err: %s", err) } expected := map[string]map[string]string{ "foo": map[string]string{ "foo": "bar", "key": "7", }, } if !reflect.DeepEqual(val, expected) { t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected) } } func TestDecode_structure(t *testing.T) { type Embedded interface{} type V struct { Embedded `hcl:"-"` Key int Foo string } var actual V err := Decode(&actual, testReadFile(t, "flat.hcl")) if err != nil { t.Fatalf("err: %s", err) } expected := V{ Key: 7, Foo: "bar", } if !reflect.DeepEqual(actual, expected) { t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected) } } func TestDecode_structurePtr(t *testing.T) { type V struct { Key int Foo string } var actual *V err := Decode(&actual, testReadFile(t, "flat.hcl")) if err != nil { t.Fatalf("err: %s", err) } expected := &V{ Key: 7, Foo: "bar", } if !reflect.DeepEqual(actual, expected) { t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected) } } func TestDecode_structureArray(t *testing.T) { // This test is extracted from a failure in Consul (consul.io), // hence the interesting structure naming. type KeyPolicyType string type KeyPolicy struct { Prefix string `hcl:",key"` Policy KeyPolicyType } type Policy struct { Keys []KeyPolicy `hcl:"key,expand"` } expected := Policy{ Keys: []KeyPolicy{ KeyPolicy{ Prefix: "", Policy: "read", }, KeyPolicy{ Prefix: "foo/", Policy: "write", }, KeyPolicy{ Prefix: "foo/bar/", Policy: "read", }, KeyPolicy{ Prefix: "foo/bar/baz", Policy: "deny", }, }, } files := []string{ "decode_policy.hcl", "decode_policy.json", } for _, f := range files { var actual Policy err := Decode(&actual, testReadFile(t, f)) if err != nil { t.Fatalf("Input: %s\n\nerr: %s", f, err) } if !reflect.DeepEqual(actual, expected) { t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) } } } func TestDecode_sliceExpand(t *testing.T) { type testInner struct { Name string `hcl:",key"` Key string } type testStruct struct { Services []testInner `hcl:"service,expand"` } expected := testStruct{ Services: []testInner{ testInner{ Name: "my-service-0", Key: "value", }, testInner{ Name: "my-service-1", Key: "value", }, }, } files := []string{ "slice_expand.hcl", } for _, f := range files { t.Logf("Testing: %s", f) var actual testStruct err := Decode(&actual, testReadFile(t, f)) if err != nil { t.Fatalf("Input: %s\n\nerr: %s", f, err) } if !reflect.DeepEqual(actual, expected) { t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) } } } func TestDecode_structureMap(t *testing.T) { // This test is extracted from a failure in Terraform (terraform.io), // hence the interesting structure naming. type hclVariable struct { Default interface{} Description string Fields []string `hcl:",decodedFields"` } type rawConfig struct { Variable map[string]hclVariable } expected := rawConfig{ Variable: map[string]hclVariable{ "foo": hclVariable{ Default: "bar", Description: "bar", Fields: []string{"Default", "Description"}, }, "amis": hclVariable{ Default: []map[string]interface{}{ map[string]interface{}{ "east": "foo", }, }, Fields: []string{"Default"}, }, }, } files := []string{ "decode_tf_variable.hcl", "decode_tf_variable.json", } for _, f := range files { t.Logf("Testing: %s", f) var actual rawConfig err := Decode(&actual, testReadFile(t, f)) if err != nil { t.Fatalf("Input: %s\n\nerr: %s", f, err) } if !reflect.DeepEqual(actual, expected) { t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected) } } } func TestDecode_structureMapInvalid(t *testing.T) { // Terraform GH-8295 type hclVariable struct { Default interface{} Description string Fields []string `hcl:",decodedFields"` } type rawConfig struct { Variable map[string]*hclVariable } var actual rawConfig err := Decode(&actual, testReadFile(t, "terraform_variable_invalid.json")) if err == nil { t.Fatal("expected error") } } func TestDecode_interfaceNonPointer(t *testing.T) { var value interface{} err := Decode(value, testReadFile(t, "basic_int_string.hcl")) if err == nil { t.Fatal("should error") } } func TestDecode_intString(t *testing.T) { var value struct { Count int } err := Decode(&value, testReadFile(t, "basic_int_string.hcl")) if err != nil { t.Fatalf("err: %s", err) } if value.Count != 3 { t.Fatalf("bad: %#v", value.Count) } } func TestDecode_float32(t *testing.T) { var value struct { A float32 `hcl:"a"` B float32 `hcl:"b"` } err := Decode(&value, testReadFile(t, "float.hcl")) if err != nil { t.Fatalf("err: %s", err) } if got, want := value.A, float32(1.02); got != want { t.Fatalf("wrong result %#v; want %#v", got, want) } if got, want := value.B, float32(2); got != want { t.Fatalf("wrong result %#v; want %#v", got, want) } } func TestDecode_float64(t *testing.T) { var value struct { A float64 `hcl:"a"` B float64 `hcl:"b"` } err := Decode(&value, testReadFile(t, "float.hcl")) if err != nil { t.Fatalf("err: %s", err) } if got, want := value.A, float64(1.02); got != want { t.Fatalf("wrong result %#v; want %#v", got, want) } if got, want := value.B, float64(2); got != want { t.Fatalf("wrong result %#v; want %#v", got, want) } } func TestDecode_intStringAliased(t *testing.T) { var value struct { Count time.Duration } err := Decode(&value, testReadFile(t, "basic_int_string.hcl")) if err != nil { t.Fatalf("err: %s", err) } if value.Count != time.Duration(3) { t.Fatalf("bad: %#v", value.Count) } } func TestDecode_Node(t *testing.T) { // given var value struct { Content ast.Node Nested struct { Content ast.Node } } content := ` content { hello = "world" } ` // when err := Decode(&value, content) // then if err != nil { t.Errorf("unable to decode content, %v", err) return } // verify ast.Node can be decoded later var v map[string]interface{} err = DecodeObject(&v, value.Content) if err != nil { t.Errorf("unable to decode content, %v", err) return } if v["hello"] != "world" { t.Errorf("expected mapping to be returned") } } func TestDecode_NestedNode(t *testing.T) { // given var value struct { Nested struct { Content ast.Node } } content := ` nested "content" { hello = "world" } ` // when err := Decode(&value, content) // then if err != nil { t.Errorf("unable to decode content, %v", err) return } // verify ast.Node can be decoded later var v map[string]interface{} err = DecodeObject(&v, value.Nested.Content) if err != nil { t.Errorf("unable to decode content, %v", err) return } if v["hello"] != "world" { t.Errorf("expected mapping to be returned") } } // https://github.com/hashicorp/hcl/issues/60 func TestDecode_topLevelKeys(t *testing.T) { type Template struct { Source string } templates := struct { Templates []*Template `hcl:"template"` }{} err := Decode(&templates, ` template { source = "blah" } template { source = "blahblah" }`) if err != nil { t.Fatal(err) } if templates.Templates[0].Source != "blah" { t.Errorf("bad source: %s", templates.Templates[0].Source) } if templates.Templates[1].Source != "blahblah" { t.Errorf("bad source: %s", templates.Templates[1].Source) } } func TestDecode_flattenedJSON(t *testing.T) { // make sure we can also correctly extract a Name key too type V struct { Name string `hcl:",key"` Description string Default map[string]string } type Vars struct { Variable []*V } cases := []struct { JSON string Out interface{} Expected interface{} }{ { // Nested object, no sibling keys JSON: ` { "var_name": { "default": { "key1": "a", "key2": "b" } } } `, Out: &[]*V{}, Expected: &[]*V{ &V{ Name: "var_name", Default: map[string]string{"key1": "a", "key2": "b"}, }, }, }, { // Nested object with a sibling key (this worked previously) JSON: ` { "var_name": { "description": "Described", "default": { "key1": "a", "key2": "b" } } } `, Out: &[]*V{}, Expected: &[]*V{ &V{ Name: "var_name", Description: "Described", Default: map[string]string{"key1": "a", "key2": "b"}, }, }, }, { // Multiple nested objects, one with a sibling key JSON: ` { "variable": { "var_1": { "default": { "key1": "a", "key2": "b" } }, "var_2": { "description": "Described", "default": { "key1": "a", "key2": "b" } } } } `, Out: &Vars{}, Expected: &Vars{ Variable: []*V{ &V{ Name: "var_1", Default: map[string]string{"key1": "a", "key2": "b"}, }, &V{ Name: "var_2", Description: "Described", Default: map[string]string{"key1": "a", "key2": "b"}, }, }, }, }, { // Nested object to maps JSON: ` { "variable": { "var_name": { "description": "Described", "default": { "key1": "a", "key2": "b" } } } } `, Out: &[]map[string]interface{}{}, Expected: &[]map[string]interface{}{ { "variable": []map[string]interface{}{ { "var_name": []map[string]interface{}{ { "description": "Described", "default": []map[string]interface{}{ { "key1": "a", "key2": "b", }, }, }, }, }, }, }, }, }, { // Nested object to maps without a sibling key should decode the same as above JSON: ` { "variable": { "var_name": { "default": { "key1": "a", "key2": "b" } } } } `, Out: &[]map[string]interface{}{}, Expected: &[]map[string]interface{}{ { "variable": []map[string]interface{}{ { "var_name": []map[string]interface{}{ { "default": []map[string]interface{}{ { "key1": "a", "key2": "b", }, }, }, }, }, }, }, }, }, { // Nested objects, one with a sibling key, and one without JSON: ` { "variable": { "var_1": { "default": { "key1": "a", "key2": "b" } }, "var_2": { "description": "Described", "default": { "key1": "a", "key2": "b" } } } } `, Out: &[]map[string]interface{}{}, Expected: &[]map[string]interface{}{ { "variable": []map[string]interface{}{ { "var_1": []map[string]interface{}{ { "default": []map[string]interface{}{ { "key1": "a", "key2": "b", }, }, }, }, }, }, }, { "variable": []map[string]interface{}{ { "var_2": []map[string]interface{}{ { "description": "Described", "default": []map[string]interface{}{ { "key1": "a", "key2": "b", }, }, }, }, }, }, }, }, }, } for i, tc := range cases { err := Decode(tc.Out, tc.JSON) if err != nil { t.Fatalf("[%d] err: %s", i, err) } if !reflect.DeepEqual(tc.Out, tc.Expected) { t.Fatalf("[%d]\ngot: %s\nexpected: %s\n", i, spew.Sdump(tc.Out), spew.Sdump(tc.Expected)) } } } hcl-1.0.0/go.mod000066400000000000000000000001131334037463000133620ustar00rootroot00000000000000module github.com/hashicorp/hcl require github.com/davecgh/go-spew v1.1.1 hcl-1.0.0/go.sum000066400000000000000000000002531334037463000134140ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= hcl-1.0.0/hcl.go000066400000000000000000000007401334037463000133570ustar00rootroot00000000000000// Package hcl decodes HCL into usable Go structures. // // hcl input can come in either pure HCL format or JSON format. // It can be parsed into an AST, and then decoded into a structure, // or it can be decoded directly from a string into a structure. // // If you choose to parse HCL into a raw AST, the benefit is that you // can write custom visitor implementations to implement custom // semantic checks. By default, HCL does not perform any semantic // checks. package hcl hcl-1.0.0/hcl/000077500000000000000000000000001334037463000130275ustar00rootroot00000000000000hcl-1.0.0/hcl/ast/000077500000000000000000000000001334037463000136165ustar00rootroot00000000000000hcl-1.0.0/hcl/ast/ast.go000066400000000000000000000125751334037463000147460ustar00rootroot00000000000000// Package ast declares the types used to represent syntax trees for HCL // (HashiCorp Configuration Language) package ast import ( "fmt" "strings" "github.com/hashicorp/hcl/hcl/token" ) // Node is an element in the abstract syntax tree. type Node interface { node() Pos() token.Pos } func (File) node() {} func (ObjectList) node() {} func (ObjectKey) node() {} func (ObjectItem) node() {} func (Comment) node() {} func (CommentGroup) node() {} func (ObjectType) node() {} func (LiteralType) node() {} func (ListType) node() {} // File represents a single HCL file type File struct { Node Node // usually a *ObjectList Comments []*CommentGroup // list of all comments in the source } func (f *File) Pos() token.Pos { return f.Node.Pos() } // ObjectList represents a list of ObjectItems. An HCL file itself is an // ObjectList. type ObjectList struct { Items []*ObjectItem } func (o *ObjectList) Add(item *ObjectItem) { o.Items = append(o.Items, item) } // Filter filters out the objects with the given key list as a prefix. // // The returned list of objects contain ObjectItems where the keys have // this prefix already stripped off. This might result in objects with // zero-length key lists if they have no children. // // If no matches are found, an empty ObjectList (non-nil) is returned. func (o *ObjectList) Filter(keys ...string) *ObjectList { var result ObjectList for _, item := range o.Items { // If there aren't enough keys, then ignore this if len(item.Keys) < len(keys) { continue } match := true for i, key := range item.Keys[:len(keys)] { key := key.Token.Value().(string) if key != keys[i] && !strings.EqualFold(key, keys[i]) { match = false break } } if !match { continue } // Strip off the prefix from the children newItem := *item newItem.Keys = newItem.Keys[len(keys):] result.Add(&newItem) } return &result } // Children returns further nested objects (key length > 0) within this // ObjectList. This should be used with Filter to get at child items. func (o *ObjectList) Children() *ObjectList { var result ObjectList for _, item := range o.Items { if len(item.Keys) > 0 { result.Add(item) } } return &result } // Elem returns items in the list that are direct element assignments // (key length == 0). This should be used with Filter to get at elements. func (o *ObjectList) Elem() *ObjectList { var result ObjectList for _, item := range o.Items { if len(item.Keys) == 0 { result.Add(item) } } return &result } func (o *ObjectList) Pos() token.Pos { // always returns the uninitiliazed position return o.Items[0].Pos() } // ObjectItem represents a HCL Object Item. An item is represented with a key // (or keys). It can be an assignment or an object (both normal and nested) type ObjectItem struct { // keys is only one length long if it's of type assignment. If it's a // nested object it can be larger than one. In that case "assign" is // invalid as there is no assignments for a nested object. Keys []*ObjectKey // assign contains the position of "=", if any Assign token.Pos // val is the item itself. It can be an object,list, number, bool or a // string. If key length is larger than one, val can be only of type // Object. Val Node LeadComment *CommentGroup // associated lead comment LineComment *CommentGroup // associated line comment } func (o *ObjectItem) Pos() token.Pos { // I'm not entirely sure what causes this, but removing this causes // a test failure. We should investigate at some point. if len(o.Keys) == 0 { return token.Pos{} } return o.Keys[0].Pos() } // ObjectKeys are either an identifier or of type string. type ObjectKey struct { Token token.Token } func (o *ObjectKey) Pos() token.Pos { return o.Token.Pos } // LiteralType represents a literal of basic type. Valid types are: // token.NUMBER, token.FLOAT, token.BOOL and token.STRING type LiteralType struct { Token token.Token // comment types, only used when in a list LeadComment *CommentGroup LineComment *CommentGroup } func (l *LiteralType) Pos() token.Pos { return l.Token.Pos } // ListStatement represents a HCL List type type ListType struct { Lbrack token.Pos // position of "[" Rbrack token.Pos // position of "]" List []Node // the elements in lexical order } func (l *ListType) Pos() token.Pos { return l.Lbrack } func (l *ListType) Add(node Node) { l.List = append(l.List, node) } // ObjectType represents a HCL Object Type type ObjectType struct { Lbrace token.Pos // position of "{" Rbrace token.Pos // position of "}" List *ObjectList // the nodes in lexical order } func (o *ObjectType) Pos() token.Pos { return o.Lbrace } // Comment node represents a single //, # style or /*- style commment type Comment struct { Start token.Pos // position of / or # Text string } func (c *Comment) Pos() token.Pos { return c.Start } // CommentGroup node represents a sequence of comments with no other tokens and // no empty lines between. type CommentGroup struct { List []*Comment // len(List) > 0 } func (c *CommentGroup) Pos() token.Pos { return c.List[0].Pos() } //------------------------------------------------------------------- // GoStringer //------------------------------------------------------------------- func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } hcl-1.0.0/hcl/ast/ast_test.go000066400000000000000000000102341334037463000157730ustar00rootroot00000000000000package ast import ( "reflect" "strings" "testing" "github.com/hashicorp/hcl/hcl/token" ) func TestObjectListFilter(t *testing.T) { var cases = []struct { Filter []string Input []*ObjectItem Output []*ObjectItem }{ { []string{"foo"}, []*ObjectItem{ &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{ Token: token.Token{Type: token.STRING, Text: `"foo"`}, }, }, }, }, []*ObjectItem{ &ObjectItem{ Keys: []*ObjectKey{}, }, }, }, { []string{"foo"}, []*ObjectItem{ &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, }, }, &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, }, }, }, []*ObjectItem{ &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, }, }, }, }, } for _, tc := range cases { input := &ObjectList{Items: tc.Input} expected := &ObjectList{Items: tc.Output} if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) { t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual) } } } func TestWalk(t *testing.T) { items := []*ObjectItem{ &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, }, Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}}, }, &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, }, }, } node := &ObjectList{Items: items} order := []string{ "*ast.ObjectList", "*ast.ObjectItem", "*ast.ObjectKey", "*ast.ObjectKey", "*ast.LiteralType", "*ast.ObjectItem", "*ast.ObjectKey", } count := 0 Walk(node, func(n Node) (Node, bool) { if n == nil { return n, false } typeName := reflect.TypeOf(n).String() if order[count] != typeName { t.Errorf("expected '%s' got: '%s'", order[count], typeName) } count++ return n, true }) } func TestWalkEquality(t *testing.T) { items := []*ObjectItem{ &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, }, }, &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, }, }, } node := &ObjectList{Items: items} rewritten := Walk(node, func(n Node) (Node, bool) { return n, true }) newNode, ok := rewritten.(*ObjectList) if !ok { t.Fatalf("expected Objectlist, got %T", rewritten) } if !reflect.DeepEqual(node, newNode) { t.Fatal("rewritten node is not equal to the given node") } if len(newNode.Items) != 2 { t.Errorf("expected newNode length 2, got: %d", len(newNode.Items)) } expected := []string{ `"foo"`, `"bar"`, } for i, item := range newNode.Items { if len(item.Keys) != 1 { t.Errorf("expected keys newNode length 1, got: %d", len(item.Keys)) } if item.Keys[0].Token.Text != expected[i] { t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text) } if item.Val != nil { t.Errorf("expected item value should be nil") } } } func TestWalkRewrite(t *testing.T) { items := []*ObjectItem{ &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}}, &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}}, }, }, &ObjectItem{ Keys: []*ObjectKey{ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}}, }, }, } node := &ObjectList{Items: items} suffix := "_example" node = Walk(node, func(n Node) (Node, bool) { switch i := n.(type) { case *ObjectKey: i.Token.Text = i.Token.Text + suffix n = i } return n, true }).(*ObjectList) Walk(node, func(n Node) (Node, bool) { switch i := n.(type) { case *ObjectKey: if !strings.HasSuffix(i.Token.Text, suffix) { t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix) } } return n, true }) } hcl-1.0.0/hcl/ast/walk.go000066400000000000000000000023731334037463000151100ustar00rootroot00000000000000package ast import "fmt" // WalkFunc describes a function to be called for each node during a Walk. The // returned node can be used to rewrite the AST. Walking stops the returned // bool is false. type WalkFunc func(Node) (Node, bool) // Walk traverses an AST in depth-first order: It starts by calling fn(node); // node must not be nil. If fn returns true, Walk invokes fn recursively for // each of the non-nil children of node, followed by a call of fn(nil). The // returned node of fn can be used to rewrite the passed node to fn. func Walk(node Node, fn WalkFunc) Node { rewritten, ok := fn(node) if !ok { return rewritten } switch n := node.(type) { case *File: n.Node = Walk(n.Node, fn) case *ObjectList: for i, item := range n.Items { n.Items[i] = Walk(item, fn).(*ObjectItem) } case *ObjectKey: // nothing to do case *ObjectItem: for i, k := range n.Keys { n.Keys[i] = Walk(k, fn).(*ObjectKey) } if n.Val != nil { n.Val = Walk(n.Val, fn) } case *LiteralType: // nothing to do case *ListType: for i, l := range n.List { n.List[i] = Walk(l, fn) } case *ObjectType: n.List = Walk(n.List, fn).(*ObjectList) default: // should we panic here? fmt.Printf("unknown type: %T\n", n) } fn(nil) return rewritten } hcl-1.0.0/hcl/fmtcmd/000077500000000000000000000000001334037463000143015ustar00rootroot00000000000000hcl-1.0.0/hcl/fmtcmd/fmtcmd.go000066400000000000000000000062541334037463000161110ustar00rootroot00000000000000// Derivative work from: // - https://golang.org/src/cmd/gofmt/gofmt.go // - https://github.com/fatih/hclfmt package fmtcmd import ( "bytes" "errors" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "github.com/hashicorp/hcl/hcl/printer" ) var ( ErrWriteStdin = errors.New("cannot use write option with standard input") ) type Options struct { List bool // list files whose formatting differs Write bool // write result to (source) file instead of stdout Diff bool // display diffs of formatting changes } func isValidFile(f os.FileInfo, extensions []string) bool { if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") { for _, ext := range extensions { if strings.HasSuffix(f.Name(), "."+ext) { return true } } } return false } // If in == nil, the source is the contents of the file with the given filename. func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error { if in == nil { f, err := os.Open(filename) if err != nil { return err } defer f.Close() in = f } src, err := ioutil.ReadAll(in) if err != nil { return err } res, err := printer.Format(src) if err != nil { return fmt.Errorf("In %s: %s", filename, err) } if !bytes.Equal(src, res) { // formatting has changed if opts.List { fmt.Fprintln(out, filename) } if opts.Write { err = ioutil.WriteFile(filename, res, 0644) if err != nil { return err } } if opts.Diff { data, err := diff(src, res) if err != nil { return fmt.Errorf("computing diff: %s", err) } fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename) out.Write(data) } } if !opts.List && !opts.Write && !opts.Diff { _, err = out.Write(res) } return err } func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error { visitFile := func(path string, f os.FileInfo, err error) error { if err == nil && isValidFile(f, extensions) { err = processFile(path, nil, stdout, false, opts) } return err } return filepath.Walk(path, visitFile) } func Run( paths, extensions []string, stdin io.Reader, stdout io.Writer, opts Options, ) error { if len(paths) == 0 { if opts.Write { return ErrWriteStdin } if err := processFile("", stdin, stdout, true, opts); err != nil { return err } return nil } for _, path := range paths { switch dir, err := os.Stat(path); { case err != nil: return err case dir.IsDir(): if err := walkDir(path, extensions, stdout, opts); err != nil { return err } default: if err := processFile(path, nil, stdout, false, opts); err != nil { return err } } } return nil } func diff(b1, b2 []byte) (data []byte, err error) { f1, err := ioutil.TempFile("", "") if err != nil { return } defer os.Remove(f1.Name()) defer f1.Close() f2, err := ioutil.TempFile("", "") if err != nil { return } defer os.Remove(f2.Name()) defer f2.Close() f1.Write(b1) f2.Write(b2) data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() if len(data) > 0 { // diff exits with a non-zero status when the files don't match. // Ignore that failure as long as we get output. err = nil } return } hcl-1.0.0/hcl/fmtcmd/fmtcmd_test.go000066400000000000000000000215621334037463000171470ustar00rootroot00000000000000// +build !windows // TODO(jen20): These need fixing on Windows but fmt is not used right now // and red CI is making it harder to process other bugs, so ignore until // we get around to fixing them. package fmtcmd import ( "bytes" "fmt" "io/ioutil" "os" "path/filepath" "reflect" "regexp" "sort" "syscall" "testing" "github.com/hashicorp/hcl/testhelper" ) var fixtureExtensions = []string{"hcl"} func init() { sort.Sort(ByFilename(fixtures)) } func TestIsValidFile(t *testing.T) { const fixtureDir = "./test-fixtures" cases := []struct { Path string Expected bool }{ {"good.hcl", true}, {".hidden.ignore", false}, {"file.ignore", false}, {"dir.ignore", false}, } for _, tc := range cases { file, err := os.Stat(filepath.Join(fixtureDir, tc.Path)) if err != nil { t.Errorf("unexpected error: %s", err) } if res := isValidFile(file, fixtureExtensions); res != tc.Expected { t.Errorf("want: %t, got: %t", tc.Expected, res) } } } func TestRunMultiplePaths(t *testing.T) { path1, err := renderFixtures("") if err != nil { t.Errorf("unexpected error: %s", err) } defer os.RemoveAll(path1) path2, err := renderFixtures("") if err != nil { t.Errorf("unexpected error: %s", err) } defer os.RemoveAll(path2) var expectedOut bytes.Buffer for _, path := range []string{path1, path2} { for _, fixture := range fixtures { if !bytes.Equal(fixture.golden, fixture.input) { expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n") } } } _, stdout := mockIO() err = Run( []string{path1, path2}, fixtureExtensions, nil, stdout, Options{ List: true, }, ) if err != nil { t.Errorf("unexpected error: %s", err) } if stdout.String() != expectedOut.String() { t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } func TestRunSubDirectories(t *testing.T) { pathParent, err := ioutil.TempDir("", "") if err != nil { t.Errorf("unexpected error: %s", err) } defer os.RemoveAll(pathParent) path1, err := renderFixtures(pathParent) if err != nil { t.Errorf("unexpected error: %s", err) } path2, err := renderFixtures(pathParent) if err != nil { t.Errorf("unexpected error: %s", err) } paths := []string{path1, path2} sort.Strings(paths) var expectedOut bytes.Buffer for _, path := range paths { for _, fixture := range fixtures { if !bytes.Equal(fixture.golden, fixture.input) { expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n") } } } _, stdout := mockIO() err = Run( []string{pathParent}, fixtureExtensions, nil, stdout, Options{ List: true, }, ) if err != nil { t.Errorf("unexpected error: %s", err) } if stdout.String() != expectedOut.String() { t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } func TestRunStdin(t *testing.T) { var expectedOut bytes.Buffer for i, fixture := range fixtures { if i != 0 { expectedOut.WriteString("\n") } expectedOut.Write(fixture.golden) } stdin, stdout := mockIO() for _, fixture := range fixtures { stdin.Write(fixture.input) } err := Run( []string{}, fixtureExtensions, stdin, stdout, Options{}, ) if err != nil { t.Errorf("unexpected error: %s", err) } if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) { t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } func TestRunStdinAndWrite(t *testing.T) { var expectedOut = []byte{} stdin, stdout := mockIO() stdin.WriteString("") err := Run( []string{}, []string{}, stdin, stdout, Options{ Write: true, }, ) if err != ErrWriteStdin { t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err) } if !bytes.Equal(stdout.Bytes(), expectedOut) { t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) } } func TestRunFileError(t *testing.T) { path, err := ioutil.TempDir("", "") if err != nil { t.Errorf("unexpected error: %s", err) } defer os.RemoveAll(path) filename := filepath.Join(path, "unreadable.hcl") var expectedError = &os.PathError{ Op: "open", Path: filename, Err: syscall.EACCES, } err = ioutil.WriteFile(filename, []byte{}, 0000) if err != nil { t.Errorf("unexpected error: %s", err) } _, stdout := mockIO() err = Run( []string{path}, fixtureExtensions, nil, stdout, Options{}, ) if !reflect.DeepEqual(err, expectedError) { t.Errorf("error want: %#v, got: %#v", expectedError, err) } } func TestRunNoOptions(t *testing.T) { path, err := renderFixtures("") if err != nil { t.Errorf("unexpected error: %s", err) } defer os.RemoveAll(path) var expectedOut bytes.Buffer for _, fixture := range fixtures { expectedOut.Write(fixture.golden) } _, stdout := mockIO() err = Run( []string{path}, fixtureExtensions, nil, stdout, Options{}, ) if err != nil { t.Errorf("unexpected error: %s", err) } if stdout.String() != expectedOut.String() { t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } func TestRunList(t *testing.T) { path, err := renderFixtures("") if err != nil { t.Errorf("unexpected error: %s", err) } defer os.RemoveAll(path) var expectedOut bytes.Buffer for _, fixture := range fixtures { if !bytes.Equal(fixture.golden, fixture.input) { expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename))) } } _, stdout := mockIO() err = Run( []string{path}, fixtureExtensions, nil, stdout, Options{ List: true, }, ) if err != nil { t.Errorf("unexpected error: %s", err) } if stdout.String() != expectedOut.String() { t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } func TestRunWrite(t *testing.T) { path, err := renderFixtures("") if err != nil { t.Errorf("unexpected error: %s", err) } defer os.RemoveAll(path) _, stdout := mockIO() err = Run( []string{path}, fixtureExtensions, nil, stdout, Options{ Write: true, }, ) if err != nil { t.Errorf("unexpected error: %s", err) } for _, fixture := range fixtures { res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename)) if err != nil { t.Errorf("unexpected error: %s", err) } if !bytes.Equal(res, fixture.golden) { t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res) } } } func TestRunDiff(t *testing.T) { path, err := renderFixtures("") if err != nil { t.Errorf("unexpected error: %s", err) } defer os.RemoveAll(path) var expectedOut bytes.Buffer for _, fixture := range fixtures { if len(fixture.diff) > 0 { expectedOut.WriteString( regexp.QuoteMeta( fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename), ), ) // Need to use regex to ignore datetimes in diff. expectedOut.WriteString(`--- .+?\n`) expectedOut.WriteString(`\+\+\+ .+?\n`) expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff))) } } expectedOutString := testhelper.Unix2dos(expectedOut.String()) _, stdout := mockIO() err = Run( []string{path}, fixtureExtensions, nil, stdout, Options{ Diff: true, }, ) if err != nil { t.Errorf("unexpected error: %s", err) } if !regexp.MustCompile(expectedOutString).Match(stdout.Bytes()) { t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOutString, stdout) } } func mockIO() (stdin, stdout *bytes.Buffer) { return new(bytes.Buffer), new(bytes.Buffer) } type fixture struct { filename string input, golden, diff []byte } type ByFilename []fixture func (s ByFilename) Len() int { return len(s) } func (s ByFilename) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) } var fixtures = []fixture{ { "noop.hcl", []byte(`resource "aws_security_group" "firewall" { count = 5 } `), []byte(`resource "aws_security_group" "firewall" { count = 5 } `), []byte(``), }, { "align_equals.hcl", []byte(`variable "foo" { default = "bar" description = "bar" } `), []byte(`variable "foo" { default = "bar" description = "bar" } `), []byte(`@@ -1,4 +1,4 @@ variable "foo" { - default = "bar" + default = "bar" description = "bar" } `), }, { "indentation.hcl", []byte(`provider "aws" { access_key = "foo" secret_key = "bar" } `), []byte(`provider "aws" { access_key = "foo" secret_key = "bar" } `), []byte(`@@ -1,4 +1,4 @@ provider "aws" { - access_key = "foo" - secret_key = "bar" + access_key = "foo" + secret_key = "bar" } `), }, } // parent can be an empty string, in which case the system's default // temporary directory will be used. func renderFixtures(parent string) (path string, err error) { path, err = ioutil.TempDir(parent, "") if err != nil { return "", err } for _, fixture := range fixtures { err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644) if err != nil { os.RemoveAll(path) return "", err } } return path, nil } hcl-1.0.0/hcl/fmtcmd/test-fixtures/000077500000000000000000000000001334037463000171275ustar00rootroot00000000000000hcl-1.0.0/hcl/fmtcmd/test-fixtures/.hidden.ignore000066400000000000000000000000101334037463000216340ustar00rootroot00000000000000invalid hcl-1.0.0/hcl/fmtcmd/test-fixtures/dir.ignore000066400000000000000000000000001334037463000211000ustar00rootroot00000000000000hcl-1.0.0/hcl/fmtcmd/test-fixtures/file.ignore000066400000000000000000000000101334037463000212420ustar00rootroot00000000000000invalid hcl-1.0.0/hcl/fmtcmd/test-fixtures/good.hcl000066400000000000000000000000001334037463000205350ustar00rootroot00000000000000hcl-1.0.0/hcl/parser/000077500000000000000000000000001334037463000143235ustar00rootroot00000000000000hcl-1.0.0/hcl/parser/error.go000066400000000000000000000004121334037463000160000ustar00rootroot00000000000000package parser import ( "fmt" "github.com/hashicorp/hcl/hcl/token" ) // PosError is a parse error that contains a position. type PosError struct { Pos token.Pos Err error } func (e *PosError) Error() string { return fmt.Sprintf("At %s: %s", e.Pos, e.Err) } hcl-1.0.0/hcl/parser/error_test.go000066400000000000000000000001551334037463000170430ustar00rootroot00000000000000package parser import ( "testing" ) func TestPosError_impl(t *testing.T) { var _ error = new(PosError) } hcl-1.0.0/hcl/parser/parser.go000066400000000000000000000311131334037463000161450ustar00rootroot00000000000000// Package parser implements a parser for HCL (HashiCorp Configuration // Language) package parser import ( "bytes" "errors" "fmt" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/scanner" "github.com/hashicorp/hcl/hcl/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token comments []*ast.CommentGroup leadComment *ast.CommentGroup // last lead comment lineComment *ast.CommentGroup // last line comment enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { // normalize all line endings // since the scanner and output only work with "\n" line endings, we may // end up with dangling "\r" characters in the parsed data. src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = &PosError{Pos: pos, Err: errors.New(msg)} } f.Node, err = p.objectList(false) if scerr != nil { return nil, scerr } if err != nil { return nil, err } f.Comments = p.comments return f, nil } // objectList parses a list of items within an object (generally k/v pairs). // The parameter" obj" tells this whether to we are within an object (braces: // '{', '}') or just at the top level. If we're within an object, we end // at an RBRACE. func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { if obj { tok := p.scan() p.unscan() if tok.Type == token.RBRACE { break } } n, err := p.objectItem() if err == errEofToken { break // we are finished } // we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // object lists can be optionally comma-delimited e.g. when a list of maps // is being expressed, so a comma is allowed here - it's simply consumed tok := p.scan() if tok.Type != token.COMMA { p.unscan() } } return node, nil } func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { endline = p.tok.Pos.Line // count the endline if it's multiline comment, ie starting with /* if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { // don't use range here - no need to decode Unicode code points for i := 0; i < len(p.tok.Text); i++ { if p.tok.Text[i] == '\n' { endline++ } } } comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} p.tok = p.sc.Scan() return } func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { var list []*ast.Comment endline = p.tok.Pos.Line for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { var comment *ast.Comment comment, endline = p.consumeComment() list = append(list, comment) } // add comment group to the comments list comments = &ast.CommentGroup{List: list} p.comments = append(p.comments, comments) return } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if len(keys) > 0 && err == errEofToken { // We ignore eof token here since it is an error if we didn't // receive a value (but we did receive a key) for the item. err = nil } if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { // This is a strange boolean statement, but what it means is: // We have keys with no value, and we're likely in an object // (since RBrace ends an object). For this, we set err to nil so // we continue and get the error below of having the wrong value // type. err = nil // Reset the token type so we don't think it completed fine. See // objectType which uses p.tok.Type to check if we're done with // the object. p.tok.Type = token.EOF } if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } if p.leadComment != nil { o.LeadComment = p.leadComment p.leadComment = nil } switch p.tok.Type { case token.ASSIGN: o.Assign = p.tok.Pos o.Val, err = p.object() if err != nil { return nil, err } case token.LBRACE: o.Val, err = p.objectType() if err != nil { return nil, err } default: keyStr := make([]string, 0, len(keys)) for _, k := range keys { keyStr = append(keyStr, k.Token.Text) } return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf( "key '%s' expected start of object ('{') or assignment ('=')", strings.Join(keyStr, " ")), } } // key=#comment // val if p.lineComment != nil { o.LineComment, p.lineComment = p.lineComment, nil } // do a look-ahead for line comment p.scan() if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { o.LineComment = p.lineComment p.lineComment = nil } p.unscan() return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: // It is very important to also return the keys here as well as // the error. This is because we need to be able to tell if we // did parse keys prior to finding the EOF, or if we just found // a bare EOF. return keys, errEofToken case token.ASSIGN: // assignment or object only, but not nested objects. this is not // allowed: `foo bar = {}` if keyCount > 1 { return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), } } if keyCount == 0 { return nil, &PosError{ Pos: p.tok.Pos, Err: errors.New("no object keys found!"), } } return keys, nil case token.LBRACE: var err error // If we have no keys, then it is a syntax error. i.e. {{}} is not // allowed. if len(keys) == 0 { err = &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), } } // object return keys, err case token.IDENT, token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("illegal character"), } default: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), } } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (ast.Node, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.COMMENT: // implement comment case token.EOF: return nil, errEofToken } return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("Unknown token: %+v", tok), } } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{ Lbrace: p.tok.Pos, } l, err := p.objectList(true) // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } // No error, scan and expect the ending to be a brace if tok := p.scan(); tok.Type != token.RBRACE { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), } } o.List = l o.Rbrace = p.tok.Pos // advanced via parseObjectList return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{ Lbrack: p.tok.Pos, } needComma := false for { tok := p.scan() if needComma { switch tok.Type { case token.COMMA, token.RBRACK: default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error parsing list, expected comma or list end, got: %s", tok.Type), } } } switch tok.Type { case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err } // If there is a lead comment, apply it if p.leadComment != nil { node.LeadComment = p.leadComment p.leadComment = nil } l.Add(node) needComma = true case token.COMMA: // get next list item or we are at the end // do a look-ahead for line comment p.scan() if p.lineComment != nil && len(l.List) > 0 { lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) if ok { lit.LineComment = p.lineComment l.List[len(l.List)-1] = lit p.lineComment = nil } } p.unscan() needComma = false continue case token.LBRACE: // Looks like a nested object, so parse it out node, err := p.objectType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse object within list: %s", err), } } l.Add(node) needComma = true case token.LBRACK: node, err := p.listType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse list within list: %s", err), } } l.Add(node) case token.RBRACK: // finished l.Rbrack = p.tok.Pos return l, nil default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), } } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok, }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. In the process, it collects any // comment groups encountered, and remembers the last lead and line comments. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } // Otherwise read the next token from the scanner and Save it to the buffer // in case we unscan later. prev := p.tok p.tok = p.sc.Scan() if p.tok.Type == token.COMMENT { var comment *ast.CommentGroup var endline int // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", // p.tok.Pos.Line, prev.Pos.Line, endline) if p.tok.Pos.Line == prev.Pos.Line { // The comment is on same line as the previous token; it // cannot be a lead comment but may be a line comment. comment, endline = p.consumeCommentGroup(0) if p.tok.Pos.Line != endline { // The next token is on a different line, thus // the last comment group is a line comment. p.lineComment = comment } } // consume successor comments, if any endline = -1 for p.tok.Type == token.COMMENT { comment, endline = p.consumeCommentGroup(1) } if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { switch p.tok.Type { case token.RBRACE, token.RBRACK: // Do not count for these cases default: // The next token is following on the line immediately after the // comment group, thus the last comment group is a lead comment. p.leadComment = comment } } } return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") } hcl-1.0.0/hcl/parser/parser_test.go000066400000000000000000000242061334037463000172110ustar00rootroot00000000000000package parser import ( "fmt" "io/ioutil" "path/filepath" "reflect" "runtime" "strings" "testing" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/token" ) func TestType(t *testing.T) { var literals = []struct { typ token.Type src string }{ {token.STRING, `foo = "foo"`}, {token.NUMBER, `foo = 123`}, {token.NUMBER, `foo = -29`}, {token.FLOAT, `foo = 123.12`}, {token.FLOAT, `foo = -123.12`}, {token.BOOL, `foo = true`}, {token.HEREDOC, "foo = <= 0 { result = p.heredocIndent(result) } } return result } // objectItem returns the printable HCL form of an object item. An object type // starts with one/multiple keys and has a value. The value might be of any // type. func (p *printer) objectItem(o *ast.ObjectItem) []byte { defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) var buf bytes.Buffer if o.LeadComment != nil { for _, comment := range o.LeadComment.List { buf.WriteString(comment.Text) buf.WriteByte(newline) } } // If key and val are on different lines, treat line comments like lead comments. if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { for _, comment := range o.LineComment.List { buf.WriteString(comment.Text) buf.WriteByte(newline) } } for i, k := range o.Keys { buf.WriteString(k.Token.Text) buf.WriteByte(blank) // reach end of key if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { buf.WriteString("=") buf.WriteByte(blank) } } buf.Write(p.output(o.Val)) if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { buf.WriteByte(blank) for _, comment := range o.LineComment.List { buf.WriteString(comment.Text) } } return buf.Bytes() } // objectType returns the printable HCL form of an object type. An object type // begins with a brace and ends with a brace. func (p *printer) objectType(o *ast.ObjectType) []byte { defer un(trace(p, "ObjectType")) var buf bytes.Buffer buf.WriteString("{") var index int var nextItem token.Pos var commented, newlinePrinted bool for { // Determine the location of the next actual non-comment // item. If we're at the end, the next item is the closing brace if index != len(o.List.Items) { nextItem = o.List.Items[index].Pos() } else { nextItem = o.Rbrace } // Go through the standalone comments in the file and print out // the comments that we should be for this object item. for _, c := range p.standaloneComments { printed := false var lastCommentPos token.Pos for _, comment := range c.List { // We only care about comments after the previous item // we've printed so that comments are printed in the // correct locations (between two objects for example). // And before the next item. if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { // If there are standalone comments and the initial newline has not // been printed yet, do it now. if !newlinePrinted { newlinePrinted = true buf.WriteByte(newline) } // add newline if it's between other printed nodes if index > 0 { commented = true buf.WriteByte(newline) } // Store this position lastCommentPos = comment.Pos() // output the comment itself buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) // Set printed to true to note that we printed something printed = true /* if index != len(o.List.Items) { buf.WriteByte(newline) // do not print on the end } */ } } // Stuff to do if we had comments if printed { // Always write a newline buf.WriteByte(newline) // If there is another item in the object and our comment // didn't hug it directly, then make sure there is a blank // line separating them. if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { buf.WriteByte(newline) } } } if index == len(o.List.Items) { p.prev = o.Rbrace break } // At this point we are sure that it's not a totally empty block: print // the initial newline if it hasn't been printed yet by the previous // block about standalone comments. if !newlinePrinted { buf.WriteByte(newline) newlinePrinted = true } // check if we have adjacent one liner items. If yes we'll going to align // the comments. var aligned []*ast.ObjectItem for _, item := range o.List.Items[index:] { // we don't group one line lists if len(o.List.Items) == 1 { break } // one means a oneliner with out any lead comment // two means a oneliner with lead comment // anything else might be something else cur := lines(string(p.objectItem(item))) if cur > 2 { break } curPos := item.Pos() nextPos := token.Pos{} if index != len(o.List.Items)-1 { nextPos = o.List.Items[index+1].Pos() } prevPos := token.Pos{} if index != 0 { prevPos = o.List.Items[index-1].Pos() } // fmt.Println("DEBUG ----------------") // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) if curPos.Line+1 == nextPos.Line { aligned = append(aligned, item) index++ continue } if curPos.Line-1 == prevPos.Line { aligned = append(aligned, item) index++ // finish if we have a new line or comment next. This happens // if the next item is not adjacent if curPos.Line+1 != nextPos.Line { break } continue } break } // put newlines if the items are between other non aligned items. // newlines are also added if there is a standalone comment already, so // check it too if !commented && index != len(aligned) { buf.WriteByte(newline) } if len(aligned) >= 1 { p.prev = aligned[len(aligned)-1].Pos() items := p.alignedItems(aligned) buf.Write(p.indent(items)) } else { p.prev = o.List.Items[index].Pos() buf.Write(p.indent(p.objectItem(o.List.Items[index]))) index++ } buf.WriteByte(newline) } buf.WriteString("}") return buf.Bytes() } func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { var buf bytes.Buffer // find the longest key and value length, needed for alignment var longestKeyLen int // longest key length var longestValLen int // longest value length for _, item := range items { key := len(item.Keys[0].Token.Text) val := len(p.output(item.Val)) if key > longestKeyLen { longestKeyLen = key } if val > longestValLen { longestValLen = val } } for i, item := range items { if item.LeadComment != nil { for _, comment := range item.LeadComment.List { buf.WriteString(comment.Text) buf.WriteByte(newline) } } for i, k := range item.Keys { keyLen := len(k.Token.Text) buf.WriteString(k.Token.Text) for i := 0; i < longestKeyLen-keyLen+1; i++ { buf.WriteByte(blank) } // reach end of key if i == len(item.Keys)-1 && len(item.Keys) == 1 { buf.WriteString("=") buf.WriteByte(blank) } } val := p.output(item.Val) valLen := len(val) buf.Write(val) if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { for i := 0; i < longestValLen-valLen+1; i++ { buf.WriteByte(blank) } for _, comment := range item.LineComment.List { buf.WriteString(comment.Text) } } // do not print for the last item if i != len(items)-1 { buf.WriteByte(newline) } } return buf.Bytes() } // list returns the printable HCL form of an list type. func (p *printer) list(l *ast.ListType) []byte { if p.isSingleLineList(l) { return p.singleLineList(l) } var buf bytes.Buffer buf.WriteString("[") buf.WriteByte(newline) var longestLine int for _, item := range l.List { // for now we assume that the list only contains literal types if lit, ok := item.(*ast.LiteralType); ok { lineLen := len(lit.Token.Text) if lineLen > longestLine { longestLine = lineLen } } } haveEmptyLine := false for i, item := range l.List { // If we have a lead comment, then we want to write that first leadComment := false if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { leadComment = true // Ensure an empty line before every element with a // lead comment (except the first item in a list). if !haveEmptyLine && i != 0 { buf.WriteByte(newline) } for _, comment := range lit.LeadComment.List { buf.Write(p.indent([]byte(comment.Text))) buf.WriteByte(newline) } } // also indent each line val := p.output(item) curLen := len(val) buf.Write(p.indent(val)) // if this item is a heredoc, then we output the comma on // the next line. This is the only case this happens. comma := []byte{','} if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { buf.WriteByte(newline) comma = p.indent(comma) } buf.Write(comma) if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { // if the next item doesn't have any comments, do not align buf.WriteByte(blank) // align one space for i := 0; i < longestLine-curLen; i++ { buf.WriteByte(blank) } for _, comment := range lit.LineComment.List { buf.WriteString(comment.Text) } } buf.WriteByte(newline) // Ensure an empty line after every element with a // lead comment (except the first item in a list). haveEmptyLine = leadComment && i != len(l.List)-1 if haveEmptyLine { buf.WriteByte(newline) } } buf.WriteString("]") return buf.Bytes() } // isSingleLineList returns true if: // * they were previously formatted entirely on one line // * they consist entirely of literals // * there are either no heredoc strings or the list has exactly one element // * there are no line comments func (printer) isSingleLineList(l *ast.ListType) bool { for _, item := range l.List { if item.Pos().Line != l.Lbrack.Line { return false } lit, ok := item.(*ast.LiteralType) if !ok { return false } if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { return false } if lit.LineComment != nil { return false } } return true } // singleLineList prints a simple single line list. // For a definition of "simple", see isSingleLineList above. func (p *printer) singleLineList(l *ast.ListType) []byte { buf := &bytes.Buffer{} buf.WriteString("[") for i, item := range l.List { if i != 0 { buf.WriteString(", ") } // Output the item itself buf.Write(p.output(item)) // The heredoc marker needs to be at the end of line. if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { buf.WriteByte(newline) } } buf.WriteString("]") return buf.Bytes() } // indent indents the lines of the given buffer for each non-empty line func (p *printer) indent(buf []byte) []byte { var prefix []byte if p.cfg.SpacesWidth != 0 { for i := 0; i < p.cfg.SpacesWidth; i++ { prefix = append(prefix, blank) } } else { prefix = []byte{tab} } var res []byte bol := true for _, c := range buf { if bol && c != '\n' { res = append(res, prefix...) } res = append(res, c) bol = c == '\n' } return res } // unindent removes all the indentation from the tombstoned lines func (p *printer) unindent(buf []byte) []byte { var res []byte for i := 0; i < len(buf); i++ { skip := len(buf)-i <= len(unindent) if !skip { skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) } if skip { res = append(res, buf[i]) continue } // We have a marker. we have to backtrace here and clean out // any whitespace ahead of our tombstone up to a \n for j := len(res) - 1; j >= 0; j-- { if res[j] == '\n' { break } res = res[:j] } // Skip the entire unindent marker i += len(unindent) - 1 } return res } // heredocIndent marks all the 2nd and further lines as unindentable func (p *printer) heredocIndent(buf []byte) []byte { var res []byte bol := false for _, c := range buf { if bol && c != '\n' { res = append(res, unindent...) } res = append(res, c) bol = c == '\n' } return res } // isSingleLineObject tells whether the given object item is a single // line object such as "obj {}". // // A single line object: // // * has no lead comments (hence multi-line) // * has no assignment // * has no values in the stanza (within {}) // func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { // If there is a lead comment, can't be one line if val.LeadComment != nil { return false } // If there is assignment, we always break by line if val.Assign.IsValid() { return false } // If it isn't an object type, then its not a single line object ot, ok := val.Val.(*ast.ObjectType) if !ok { return false } // If the object has no items, it is single line! return len(ot.List.Items) == 0 } func lines(txt string) int { endline := 1 for i := 0; i < len(txt); i++ { if txt[i] == '\n' { endline++ } } return endline } // ---------------------------------------------------------------------------- // Tracing support func (p *printer) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) i := 2 * p.indentTrace for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *printer, msg string) *printer { p.printTrace(msg, "(") p.indentTrace++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *printer) { p.indentTrace-- p.printTrace(")") } hcl-1.0.0/hcl/printer/printer.go000066400000000000000000000026401334037463000165260ustar00rootroot00000000000000// Package printer implements printing of AST nodes to HCL format. package printer import ( "bytes" "io" "text/tabwriter" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/parser" ) var DefaultConfig = Config{ SpacesWidth: 2, } // A Config node controls the output of Fprint. type Config struct { SpacesWidth int // if set, it will use spaces instead of tabs for alignment } func (c *Config) Fprint(output io.Writer, node ast.Node) error { p := &printer{ cfg: *c, comments: make([]*ast.CommentGroup, 0), standaloneComments: make([]*ast.CommentGroup, 0), // enableTrace: true, } p.collectComments(node) if _, err := output.Write(p.unindent(p.output(node))); err != nil { return err } // flush tabwriter, if any var err error if tw, _ := output.(*tabwriter.Writer); tw != nil { err = tw.Flush() } return err } // Fprint "pretty-prints" an HCL node to output // It calls Config.Fprint with default settings. func Fprint(output io.Writer, node ast.Node) error { return DefaultConfig.Fprint(output, node) } // Format formats src HCL and returns the result. func Format(src []byte) ([]byte, error) { node, err := parser.Parse(src) if err != nil { return nil, err } var buf bytes.Buffer if err := DefaultConfig.Fprint(&buf, node); err != nil { return nil, err } // Add trailing newline to result buf.WriteString("\n") return buf.Bytes(), nil } hcl-1.0.0/hcl/printer/printer_test.go000066400000000000000000000102051334037463000175610ustar00rootroot00000000000000package printer import ( "bytes" "errors" "flag" "fmt" "io/ioutil" "path/filepath" "testing" "github.com/hashicorp/hcl/hcl/parser" ) var update = flag.Bool("update", false, "update golden files") const ( dataDir = "testdata" ) type entry struct { source, golden string } // Use go test -update to create/update the respective golden files. var data = []entry{ {"complexhcl.input", "complexhcl.golden"}, {"list.input", "list.golden"}, {"list_comment.input", "list_comment.golden"}, {"comment.input", "comment.golden"}, {"comment_crlf.input", "comment.golden"}, {"comment_aligned.input", "comment_aligned.golden"}, {"comment_array.input", "comment_array.golden"}, {"comment_end_file.input", "comment_end_file.golden"}, {"comment_multiline_indent.input", "comment_multiline_indent.golden"}, {"comment_multiline_no_stanza.input", "comment_multiline_no_stanza.golden"}, {"comment_multiline_stanza.input", "comment_multiline_stanza.golden"}, {"comment_newline.input", "comment_newline.golden"}, {"comment_object_multi.input", "comment_object_multi.golden"}, {"comment_standalone.input", "comment_standalone.golden"}, {"empty_block.input", "empty_block.golden"}, {"list_of_objects.input", "list_of_objects.golden"}, {"multiline_string.input", "multiline_string.golden"}, {"object_singleline.input", "object_singleline.golden"}, {"object_with_heredoc.input", "object_with_heredoc.golden"}, } func TestFiles(t *testing.T) { for _, e := range data { source := filepath.Join(dataDir, e.source) golden := filepath.Join(dataDir, e.golden) t.Run(e.source, func(t *testing.T) { check(t, source, golden) }) } } func check(t *testing.T, source, golden string) { src, err := ioutil.ReadFile(source) if err != nil { t.Error(err) return } res, err := format(src) if err != nil { t.Error(err) return } // update golden files if necessary if *update { if err := ioutil.WriteFile(golden, res, 0644); err != nil { t.Error(err) } return } // get golden gld, err := ioutil.ReadFile(golden) if err != nil { t.Error(err) return } // formatted source and golden must be the same if err := diff(source, golden, res, gld); err != nil { t.Error(err) return } } // diff compares a and b. func diff(aname, bname string, a, b []byte) error { var buf bytes.Buffer // holding long error message // compare lengths if len(a) != len(b) { fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b)) } // compare contents line := 1 offs := 1 for i := 0; i < len(a) && i < len(b); i++ { ch := a[i] if ch != b[i] { fmt.Fprintf(&buf, "\n%s:%d:%d: %q", aname, line, i-offs+1, lineAt(a, offs)) fmt.Fprintf(&buf, "\n%s:%d:%d: %q", bname, line, i-offs+1, lineAt(b, offs)) fmt.Fprintf(&buf, "\n\n") break } if ch == '\n' { line++ offs = i + 1 } } if buf.Len() > 0 { return errors.New(buf.String()) } return nil } // format parses src, prints the corresponding AST, verifies the resulting // src is syntactically correct, and returns the resulting src or an error // if any. func format(src []byte) ([]byte, error) { formatted, err := Format(src) if err != nil { return nil, err } // make sure formatted output is syntactically correct if _, err := parser.Parse(formatted); err != nil { return nil, fmt.Errorf("parse: %s\n%s", err, formatted) } return formatted, nil } // lineAt returns the line in text starting at offset offs. func lineAt(text []byte, offs int) []byte { i := offs for i < len(text) && text[i] != '\n' { i++ } return text[offs:i] } // TestFormatParsable ensures that the output of Format() is can be parsed again. func TestFormatValidOutput(t *testing.T) { cases := []string{ "#\x00", "#\ue123t", "x=//\n0y=<<_\n_\n", "y=[1,//\n]", "Y=<<4\n4/\n\n\n/4/@=4/\n\n\n/4000000004\r\r\n00004\n", "x=<<_\n_\r\r\n_\n", "X=<<-\n\r\r\n", } for _, c := range cases { f, err := Format([]byte(c)) if err != nil { // ignore these failures, not all inputs are valid HCL. t.Logf("Format(%q) = %v", c, err) continue } if _, err := parser.Parse(f); err != nil { t.Errorf("Format(%q) = %q; Parse(%q) = %v", c, f, f, err) continue } } } hcl-1.0.0/hcl/printer/testdata/000077500000000000000000000000001334037463000163235ustar00rootroot00000000000000hcl-1.0.0/hcl/printer/testdata/comment.golden000066400000000000000000000013001334037463000211510ustar00rootroot00000000000000// A standalone comment is a comment which is not attached to any kind of node // This comes from Terraform, as a test variable "foo" { # Standalone comment should be still here default = "bar" description = "bar" # yooo } /* This is a multi line standalone comment*/ // fatih arslan /* This is a developer test account and a multine comment */ developer = ["fatih", "arslan"] // fatih arslan # One line here numbers = [1, 2] // another line here # Another comment variable = { description = "bar" # another yooo foo { # Nested standalone bar = "fatih" } } // lead comment foo { bar = "fatih" // line comment 2 } // line comment 3 // comment multiline = "assignment" hcl-1.0.0/hcl/printer/testdata/comment.input000066400000000000000000000014041334037463000210450ustar00rootroot00000000000000// A standalone comment is a comment which is not attached to any kind of node // This comes from Terraform, as a test variable "foo" { # Standalone comment should be still here default = "bar" description = "bar" # yooo } /* This is a multi line standalone comment*/ // fatih arslan /* This is a developer test account and a multine comment */ developer = [ "fatih", "arslan"] // fatih arslan # One line here numbers = [1,2] // another line here # Another comment variable = { description = "bar" # another yooo foo { # Nested standalone bar = "fatih" } } // lead comment foo { bar = "fatih" // line comment 2 } // line comment 3 multiline = // comment "assignment" hcl-1.0.0/hcl/printer/testdata/comment_aligned.golden000066400000000000000000000012641334037463000226450ustar00rootroot00000000000000aligned { # We have some aligned items below foo = "fatih" # yoo1 default = "bar" # yoo2 bar = "bar and foo" # yoo3 default = { bar = "example" } #deneme arslan fatih = ["fatih"] # yoo4 #fatih arslan fatiharslan = ["arslan"] // yoo5 default = { bar = "example" } security_groups = [ "foo", # kenya 1 "${aws_security_group.firewall.foo}", # kenya 2 ] security_groups2 = [ "foo", # kenya 1 "bar", # kenya 1.5 "${aws_security_group.firewall.foo}", # kenya 2 "foobar", # kenya 3 ] } hcl-1.0.0/hcl/printer/testdata/comment_aligned.input000066400000000000000000000010301334037463000225230ustar00rootroot00000000000000aligned { # We have some aligned items below foo = "fatih" # yoo1 default = "bar" # yoo2 bar = "bar and foo" # yoo3 default = { bar = "example" } #deneme arslan fatih = ["fatih"] # yoo4 #fatih arslan fatiharslan = ["arslan"] // yoo5 default = { bar = "example" } security_groups = [ "foo", # kenya 1 "${aws_security_group.firewall.foo}", # kenya 2 ] security_groups2 = [ "foo", # kenya 1 "bar", # kenya 1.5 "${aws_security_group.firewall.foo}", # kenya 2 "foobar", # kenya 3 ] } hcl-1.0.0/hcl/printer/testdata/comment_array.golden000066400000000000000000000002211334037463000223500ustar00rootroot00000000000000banana = [ # I really want to comment this item in the array. "a", # This as well "b", "c", # And C "d", # And another "e", ] hcl-1.0.0/hcl/printer/testdata/comment_array.input000066400000000000000000000002211334037463000222370ustar00rootroot00000000000000banana = [ # I really want to comment this item in the array. "a", # This as well "b", "c", # And C "d", # And another "e", ] hcl-1.0.0/hcl/printer/testdata/comment_crlf.input000066400000000000000000000014521334037463000220560ustar00rootroot00000000000000// A standalone comment is a comment which is not attached to any kind of node // This comes from Terraform, as a test variable "foo" { # Standalone comment should be still here default = "bar" description = "bar" # yooo } /* This is a multi line standalone comment*/ // fatih arslan /* This is a developer test account and a multine comment */ developer = [ "fatih", "arslan"] // fatih arslan # One line here numbers = [1,2] // another line here # Another comment variable = { description = "bar" # another yooo foo { # Nested standalone bar = "fatih" } } // lead comment foo { bar = "fatih" // line comment 2 } // line comment 3 multiline = // comment "assignment" hcl-1.0.0/hcl/printer/testdata/comment_end_file.golden000066400000000000000000000000451334037463000230030ustar00rootroot00000000000000resource "blah" "blah" {} // // // hcl-1.0.0/hcl/printer/testdata/comment_end_file.input000066400000000000000000000000441334037463000226710ustar00rootroot00000000000000resource "blah" "blah" {} // // // hcl-1.0.0/hcl/printer/testdata/comment_multiline_indent.golden000066400000000000000000000002261334037463000246020ustar00rootroot00000000000000resource "provider" "resource" { /* SPACE_SENSITIVE_CODE = < 0 { // common case: last character was not a '\n' s.tokPos.Line = s.srcPos.Line s.tokPos.Column = s.srcPos.Column } else { // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) s.tokPos.Line = s.srcPos.Line - 1 s.tokPos.Column = s.lastLineLen } switch { case isLetter(ch): tok = token.IDENT lit := s.scanIdentifier() if lit == "true" || lit == "false" { tok = token.BOOL } case isDecimal(ch): tok = s.scanNumber(ch) default: switch ch { case eof: tok = token.EOF case '"': tok = token.STRING s.scanString() case '#', '/': tok = token.COMMENT s.scanComment(ch) case '.': tok = token.PERIOD ch = s.peek() if isDecimal(ch) { tok = token.FLOAT ch = s.scanMantissa(ch) ch = s.scanExponent(ch) } case '<': tok = token.HEREDOC s.scanHeredoc() case '[': tok = token.LBRACK case ']': tok = token.RBRACK case '{': tok = token.LBRACE case '}': tok = token.RBRACE case ',': tok = token.COMMA case '=': tok = token.ASSIGN case '+': tok = token.ADD case '-': if isDecimal(s.peek()) { ch := s.next() tok = s.scanNumber(ch) } else { tok = token.SUB } default: s.err("illegal char") } } // finish token ending s.tokEnd = s.srcPos.Offset // create token literal var tokenText string if s.tokStart >= 0 { tokenText = string(s.src[s.tokStart:s.tokEnd]) } s.tokStart = s.tokEnd // ensure idempotency of tokenText() call return token.Token{ Type: tok, Pos: s.tokPos, Text: tokenText, } } func (s *Scanner) scanComment(ch rune) { // single line comments if ch == '#' || (ch == '/' && s.peek() != '*') { if ch == '/' && s.peek() != '/' { s.err("expected '/' for comment") return } ch = s.next() for ch != '\n' && ch >= 0 && ch != eof { ch = s.next() } if ch != eof && ch >= 0 { s.unread() } return } // be sure we get the character after /* This allows us to find comment's // that are not erminated if ch == '/' { s.next() ch = s.next() // read character after "/*" } // look for /* - style comments for { if ch < 0 || ch == eof { s.err("comment not terminated") break } ch0 := ch ch = s.next() if ch0 == '*' && ch == '/' { break } } } // scanNumber scans a HCL number definition starting with the given rune func (s *Scanner) scanNumber(ch rune) token.Type { if ch == '0' { // check for hexadecimal, octal or float ch = s.next() if ch == 'x' || ch == 'X' { // hexadecimal ch = s.next() found := false for isHexadecimal(ch) { ch = s.next() found = true } if !found { s.err("illegal hexadecimal number") } if ch != eof { s.unread() } return token.NUMBER } // now it's either something like: 0421(octal) or 0.1231(float) illegalOctal := false for isDecimal(ch) { ch = s.next() if ch == '8' || ch == '9' { // this is just a possibility. For example 0159 is illegal, but // 0159.23 is valid. So we mark a possible illegal octal. If // the next character is not a period, we'll print the error. illegalOctal = true } } if ch == 'e' || ch == 'E' { ch = s.scanExponent(ch) return token.FLOAT } if ch == '.' { ch = s.scanFraction(ch) if ch == 'e' || ch == 'E' { ch = s.next() ch = s.scanExponent(ch) } return token.FLOAT } if illegalOctal { s.err("illegal octal number") } if ch != eof { s.unread() } return token.NUMBER } s.scanMantissa(ch) ch = s.next() // seek forward if ch == 'e' || ch == 'E' { ch = s.scanExponent(ch) return token.FLOAT } if ch == '.' { ch = s.scanFraction(ch) if ch == 'e' || ch == 'E' { ch = s.next() ch = s.scanExponent(ch) } return token.FLOAT } if ch != eof { s.unread() } return token.NUMBER } // scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false for isDecimal(ch) { ch = s.next() scanned = true } if scanned && ch != eof { s.unread() } return ch } // scanFraction scans the fraction after the '.' rune func (s *Scanner) scanFraction(ch rune) rune { if ch == '.' { ch = s.peek() // we peek just to see if we can move forward ch = s.scanMantissa(ch) } return ch } // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' // rune. func (s *Scanner) scanExponent(ch rune) rune { if ch == 'e' || ch == 'E' { ch = s.next() if ch == '-' || ch == '+' { ch = s.next() } ch = s.scanMantissa(ch) } return ch } // scanHeredoc scans a heredoc string func (s *Scanner) scanHeredoc() { // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { break } // Not an anchor match, record the start of a new line lineStart = s.srcPos.Offset } if ch == eof { s.err("heredoc not terminated") return } } return } // scanString scans a quoted string func (s *Scanner) scanString() { braces := 0 for { // '"' opening already consumed // read character after quote ch := s.next() if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { s.err("literal not terminated") return } if ch == '"' && braces == 0 { break } // If we're going into a ${} then we can ignore quotes for awhile if braces == 0 && ch == '$' && s.peek() == '{' { braces++ s.next() } else if braces > 0 && ch == '{' { braces++ } if braces > 0 && ch == '}' { braces-- } if ch == '\\' { s.scanEscape() } } return } // scanEscape scans an escape sequence func (s *Scanner) scanEscape() rune { // http://en.cppreference.com/w/cpp/language/escape ch := s.next() // read character after '/' switch ch { case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': // nothing to do case '0', '1', '2', '3', '4', '5', '6', '7': // octal notation ch = s.scanDigits(ch, 8, 3) case 'x': // hexademical notation ch = s.scanDigits(s.next(), 16, 2) case 'u': // universal character name ch = s.scanDigits(s.next(), 16, 4) case 'U': // universal character name ch = s.scanDigits(s.next(), 16, 8) default: s.err("illegal char escape") } return ch } // scanDigits scans a rune with the given base for n times. For example an // octal notation \184 would yield in scanDigits(ch, 8, 3) func (s *Scanner) scanDigits(ch rune, base, n int) rune { start := n for n > 0 && digitVal(ch) < base { ch = s.next() if ch == eof { // If we see an EOF, we halt any more scanning of digits // immediately. break } n-- } if n > 0 { s.err("illegal char escape") } if n != start && ch != eof { // we scanned all digits, put the last non digit char back, // only if we read anything at all s.unread() } return ch } // scanIdentifier scans an identifier and returns the literal string func (s *Scanner) scanIdentifier() string { offs := s.srcPos.Offset - s.lastCharLen ch := s.next() for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { ch = s.next() } if ch != eof { s.unread() // we got identifier, put back latest char } return string(s.src[offs:s.srcPos.Offset]) } // recentPosition returns the position of the character immediately after the // character or token returned by the last call to Scan. func (s *Scanner) recentPosition() (pos token.Pos) { pos.Offset = s.srcPos.Offset - s.lastCharLen switch { case s.srcPos.Column > 0: // common case: last character was not a '\n' pos.Line = s.srcPos.Line pos.Column = s.srcPos.Column case s.lastLineLen > 0: // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) pos.Line = s.srcPos.Line - 1 pos.Column = s.lastLineLen default: // at the beginning of the source pos.Line = 1 pos.Column = 1 } return } // err prints the error of any scanning to s.Error function. If the function is // not defined, by default it prints them to os.Stderr func (s *Scanner) err(msg string) { s.ErrorCount++ pos := s.recentPosition() if s.Error != nil { s.Error(pos, msg) return } fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) } // isHexadecimal returns true if the given rune is a letter func isLetter(ch rune) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) } // isDigit returns true if the given rune is a decimal digit func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) } // isDecimal returns true if the given rune is a decimal number func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } // isHexadecimal returns true if the given rune is an hexadecimal number func isHexadecimal(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' } // isWhitespace returns true if the rune is a space, tab, newline or carriage return func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } // digitVal returns the integer value of a given octal,decimal or hexadecimal rune func digitVal(ch rune) int { switch { case '0' <= ch && ch <= '9': return int(ch - '0') case 'a' <= ch && ch <= 'f': return int(ch - 'a' + 10) case 'A' <= ch && ch <= 'F': return int(ch - 'A' + 10) } return 16 // larger than any legal digit val } hcl-1.0.0/hcl/scanner/scanner_test.go000066400000000000000000000373111334037463000175040ustar00rootroot00000000000000package scanner import ( "bytes" "fmt" "testing" "strings" "github.com/hashicorp/hcl/hcl/token" ) var f100 = strings.Repeat("f", 100) type tokenPair struct { tok token.Type text string } var tokenLists = map[string][]tokenPair{ "comment": []tokenPair{ {token.COMMENT, "//"}, {token.COMMENT, "////"}, {token.COMMENT, "// comment"}, {token.COMMENT, "// /* comment */"}, {token.COMMENT, "// // comment //"}, {token.COMMENT, "//" + f100}, {token.COMMENT, "#"}, {token.COMMENT, "##"}, {token.COMMENT, "# comment"}, {token.COMMENT, "# /* comment */"}, {token.COMMENT, "# # comment #"}, {token.COMMENT, "#" + f100}, {token.COMMENT, "/**/"}, {token.COMMENT, "/***/"}, {token.COMMENT, "/* comment */"}, {token.COMMENT, "/* // comment */"}, {token.COMMENT, "/* /* comment */"}, {token.COMMENT, "/*\n comment\n*/"}, {token.COMMENT, "/*" + f100 + "*/"}, }, "operator": []tokenPair{ {token.LBRACK, "["}, {token.LBRACE, "{"}, {token.COMMA, ","}, {token.PERIOD, "."}, {token.RBRACK, "]"}, {token.RBRACE, "}"}, {token.ASSIGN, "="}, {token.ADD, "+"}, {token.SUB, "-"}, }, "bool": []tokenPair{ {token.BOOL, "true"}, {token.BOOL, "false"}, }, "ident": []tokenPair{ {token.IDENT, "a"}, {token.IDENT, "a0"}, {token.IDENT, "foobar"}, {token.IDENT, "foo-bar"}, {token.IDENT, "abc123"}, {token.IDENT, "LGTM"}, {token.IDENT, "_"}, {token.IDENT, "_abc123"}, {token.IDENT, "abc123_"}, {token.IDENT, "_abc_123_"}, {token.IDENT, "_äöü"}, {token.IDENT, "_本"}, {token.IDENT, "äöü"}, {token.IDENT, "本"}, {token.IDENT, "a۰۱۸"}, {token.IDENT, "foo६४"}, {token.IDENT, "bar9876"}, }, "heredoc": []tokenPair{ {token.HEREDOC, "< 0 for %q", s.ErrorCount, src) } } func testTokenList(t *testing.T, tokenList []tokenPair) { // create artifical source code buf := new(bytes.Buffer) for _, ident := range tokenList { fmt.Fprintf(buf, "%s\n", ident.text) } s := New(buf.Bytes()) for _, ident := range tokenList { tok := s.Scan() if tok.Type != ident.tok { t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text) } if tok.Text != ident.text { t.Errorf("text = %q want %q", tok.String(), ident.text) } } } func countNewlines(s string) int { n := 0 for _, ch := range s { if ch == '\n' { n++ } } return n } func TestScanDigitsUnread(t *testing.T) { cases := []string{ "M=0\"\\00", "M=\"\\00", "\"\\00", "M=[\"\\00", "U{\"\\00", "\"\n{}#\n\"\\00", "M=[[\"\\00", "U{d=0\"\\U00", "#\n\"\\x00", "m=[[[\"\\00", } for _, c := range cases { s := New([]byte(c)) for { tok := s.Scan() if tok.Type == token.EOF { break } t.Logf("s.Scan() = %s", tok) } } } func TestScanHeredocRegexpCompile(t *testing.T) { cases := []string{ "0\xe1\n<<ȸ\nhello\nworld\nȸ", } for _, c := range cases { s := New([]byte(c)) for { tok := s.Scan() if tok.Type == token.EOF { break } t.Logf("s.Scan() = %s", tok) } } } hcl-1.0.0/hcl/strconv/000077500000000000000000000000001334037463000145255ustar00rootroot00000000000000hcl-1.0.0/hcl/strconv/quote.go000066400000000000000000000110761334037463000162160ustar00rootroot00000000000000package strconv import ( "errors" "unicode/utf8" ) // ErrSyntax indicates that a value does not have the right syntax for the target type. var ErrSyntax = errors.New("invalid syntax") // Unquote interprets s as a single-quoted, double-quoted, // or backquoted Go string literal, returning the string value // that s quotes. (If s is single-quoted, it would be a Go // character literal; Unquote returns the corresponding // one-character string.) func Unquote(s string) (t string, err error) { n := len(s) if n < 2 { return "", ErrSyntax } quote := s[0] if quote != s[n-1] { return "", ErrSyntax } s = s[1 : n-1] if quote != '"' { return "", ErrSyntax } if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { return "", ErrSyntax } // Is it trivial? Avoid allocation. if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { switch quote { case '"': return s, nil case '\'': r, size := utf8.DecodeRuneInString(s) if size == len(s) && (r != utf8.RuneError || size != 1) { return s, nil } } } var runeTmp [utf8.UTFMax]byte buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. for len(s) > 0 { // If we're starting a '${}' then let it through un-unquoted. // Specifically: we don't unquote any characters within the `${}` // section. if s[0] == '$' && len(s) > 1 && s[1] == '{' { buf = append(buf, '$', '{') s = s[2:] // Continue reading until we find the closing brace, copying as-is braces := 1 for len(s) > 0 && braces > 0 { r, size := utf8.DecodeRuneInString(s) if r == utf8.RuneError { return "", ErrSyntax } s = s[size:] n := utf8.EncodeRune(runeTmp[:], r) buf = append(buf, runeTmp[:n]...) switch r { case '{': braces++ case '}': braces-- } } if braces != 0 { return "", ErrSyntax } if len(s) == 0 { // If there's no string left, we're done! break } else { // If there's more left, we need to pop back up to the top of the loop // in case there's another interpolation in this string. continue } } if s[0] == '\n' { return "", ErrSyntax } c, multibyte, ss, err := unquoteChar(s, quote) if err != nil { return "", err } s = ss if c < utf8.RuneSelf || !multibyte { buf = append(buf, byte(c)) } else { n := utf8.EncodeRune(runeTmp[:], c) buf = append(buf, runeTmp[:n]...) } if quote == '\'' && len(s) != 0 { // single-quoted must be single character return "", ErrSyntax } } return string(buf), nil } // contains reports whether the string contains the byte c. func contains(s string, c byte) bool { for i := 0; i < len(s); i++ { if s[i] == c { return true } } return false } func unhex(b byte) (v rune, ok bool) { c := rune(b) switch { case '0' <= c && c <= '9': return c - '0', true case 'a' <= c && c <= 'f': return c - 'a' + 10, true case 'A' <= c && c <= 'F': return c - 'A' + 10, true } return } func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { // easy cases switch c := s[0]; { case c == quote && (quote == '\'' || quote == '"'): err = ErrSyntax return case c >= utf8.RuneSelf: r, size := utf8.DecodeRuneInString(s) return r, true, s[size:], nil case c != '\\': return rune(s[0]), false, s[1:], nil } // hard case: c is backslash if len(s) <= 1 { err = ErrSyntax return } c := s[1] s = s[2:] switch c { case 'a': value = '\a' case 'b': value = '\b' case 'f': value = '\f' case 'n': value = '\n' case 'r': value = '\r' case 't': value = '\t' case 'v': value = '\v' case 'x', 'u', 'U': n := 0 switch c { case 'x': n = 2 case 'u': n = 4 case 'U': n = 8 } var v rune if len(s) < n { err = ErrSyntax return } for j := 0; j < n; j++ { x, ok := unhex(s[j]) if !ok { err = ErrSyntax return } v = v<<4 | x } s = s[n:] if c == 'x' { // single-byte string, possibly not UTF-8 value = v break } if v > utf8.MaxRune { err = ErrSyntax return } value = v multibyte = true case '0', '1', '2', '3', '4', '5', '6', '7': v := rune(c) - '0' if len(s) < 2 { err = ErrSyntax return } for j := 0; j < 2; j++ { // one digit already; two more x := rune(s[j]) - '0' if x < 0 || x > 7 { err = ErrSyntax return } v = (v << 3) | x } s = s[2:] if v > 255 { err = ErrSyntax return } value = v case '\\': value = '\\' case '\'', '"': if c != quote { err = ErrSyntax return } value = rune(c) default: err = ErrSyntax return } tail = s return } hcl-1.0.0/hcl/strconv/quote_test.go000066400000000000000000000036301334037463000172520ustar00rootroot00000000000000package strconv import "testing" type quoteTest struct { in string out string ascii string } var quotetests = []quoteTest{ {"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`}, {"\\", `"\\"`, `"\\"`}, {"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`}, {"\u263a", `"☺"`, `"\u263a"`}, {"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`}, {"\x04", `"\x04"`, `"\x04"`}, } type unQuoteTest struct { in string out string } var unquotetests = []unQuoteTest{ {`""`, ""}, {`"a"`, "a"}, {`"abc"`, "abc"}, {`"☺"`, "☺"}, {`"hello world"`, "hello world"}, {`"\xFF"`, "\xFF"}, {`"\377"`, "\377"}, {`"\u1234"`, "\u1234"}, {`"\U00010111"`, "\U00010111"}, {`"\U0001011111"`, "\U0001011111"}, {`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""}, {`"'"`, "'"}, {`"${file("foo")}"`, `${file("foo")}`}, {`"${file("\"foo\"")}"`, `${file("\"foo\"")}`}, {`"echo ${var.region}${element(split(",",var.zones),0)}"`, `echo ${var.region}${element(split(",",var.zones),0)}`}, {`"${HH\\:mm\\:ss}"`, `${HH\\:mm\\:ss}`}, {`"${\n}"`, `${\n}`}, } var misquoted = []string{ ``, `"`, `"a`, `"'`, `b"`, `"\"`, `"\9"`, `"\19"`, `"\129"`, `'\'`, `'\9'`, `'\19'`, `'\129'`, `'ab'`, `"\x1!"`, `"\U12345678"`, `"\z"`, "`", "`xxx", "`\"", `"\'"`, `'\"'`, "\"\n\"", "\"\\n\n\"", "'\n'", `"${"`, `"${foo{}"`, "\"${foo}\n\"", } func TestUnquote(t *testing.T) { for _, tt := range unquotetests { if out, err := Unquote(tt.in); err != nil || out != tt.out { t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out) } } // run the quote tests too, backward for _, tt := range quotetests { if in, err := Unquote(tt.out); in != tt.in { t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in) } } for _, s := range misquoted { if out, err := Unquote(s); out != "" || err != ErrSyntax { t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax) } } } hcl-1.0.0/hcl/test-fixtures/000077500000000000000000000000001334037463000156555ustar00rootroot00000000000000hcl-1.0.0/hcl/test-fixtures/array_comment.hcl000066400000000000000000000000461334037463000212050ustar00rootroot00000000000000foo = [ "1", "2", # comment ] hcl-1.0.0/hcl/test-fixtures/assign_colon.hcl000066400000000000000000000001031334037463000210150ustar00rootroot00000000000000resource = [{ "foo": { "bar": {}, "baz": [1, 2, "foo"], } }] hcl-1.0.0/hcl/test-fixtures/comment.hcl000066400000000000000000000001331334037463000200040ustar00rootroot00000000000000// Foo /* Bar */ /* /* Baz */ # Another # Multiple # Lines foo = "bar" hcl-1.0.0/hcl/test-fixtures/comment_single.hcl000066400000000000000000000000101334037463000213370ustar00rootroot00000000000000# Hello hcl-1.0.0/hcl/test-fixtures/complex.hcl000066400000000000000000000013351334037463000200160ustar00rootroot00000000000000// This comes from Terraform, as a test variable "foo" { default = "bar" description = "bar" } provider "aws" { access_key = "foo" secret_key = "bar" } provider "do" { api_key = "${var.foo}" } resource "aws_security_group" "firewall" { count = 5 } resource aws_instance "web" { ami = "${var.foo}" security_groups = [ "foo", "${aws_security_group.firewall.foo}" ] network_interface { device_index = 0 description = "Main network interface" } } resource "aws_instance" "db" { security_groups = "${aws_security_group.firewall.*.id}" VPC = "foo" depends_on = ["aws_instance.web"] } output "web_ip" { value = "${aws_instance.web.private_ip}" } hcl-1.0.0/hcl/test-fixtures/complex_key.hcl000066400000000000000000000000201334037463000206540ustar00rootroot00000000000000foo.bar = "baz" hcl-1.0.0/hcl/test-fixtures/empty.hcl000066400000000000000000000000001334037463000174710ustar00rootroot00000000000000hcl-1.0.0/hcl/test-fixtures/list.hcl000066400000000000000000000000251334037463000173150ustar00rootroot00000000000000foo = [1, 2, "foo"] hcl-1.0.0/hcl/test-fixtures/list_comma.hcl000066400000000000000000000000251334037463000204710ustar00rootroot00000000000000foo = [1, 2, "foo",] hcl-1.0.0/hcl/test-fixtures/multiple.hcl000066400000000000000000000000261334037463000201760ustar00rootroot00000000000000foo = "bar" key = 7 hcl-1.0.0/hcl/test-fixtures/old.hcl000066400000000000000000000000571334037463000171250ustar00rootroot00000000000000default = { "eu-west-1": "ami-b1cf19c6", } hcl-1.0.0/hcl/test-fixtures/structure.hcl000066400000000000000000000001221334037463000204000ustar00rootroot00000000000000// This is a test structure for the lexer foo bar "baz" { key = 7 foo = "bar" } hcl-1.0.0/hcl/test-fixtures/structure_basic.hcl000066400000000000000000000000731334037463000215460ustar00rootroot00000000000000foo { value = 7 "value" = 8 "complex::value" = 9 } hcl-1.0.0/hcl/test-fixtures/structure_empty.hcl000066400000000000000000000000311334037463000216150ustar00rootroot00000000000000resource "foo" "bar" {} hcl-1.0.0/hcl/test-fixtures/types.hcl000066400000000000000000000001301334037463000175030ustar00rootroot00000000000000foo = "bar" bar = 7 baz = [1,2,3] foo = -12 bar = 3.14159 foo = true bar = false hcl-1.0.0/hcl/token/000077500000000000000000000000001334037463000141475ustar00rootroot00000000000000hcl-1.0.0/hcl/token/position.go000066400000000000000000000023121334037463000163400ustar00rootroot00000000000000package token import "fmt" // Pos describes an arbitrary source position // including the file, line, and column location. // A Position is valid if the line number is > 0. type Pos struct { Filename string // filename, if any Offset int // offset, starting at 0 Line int // line number, starting at 1 Column int // column number, starting at 1 (character count) } // IsValid returns true if the position is valid. func (p *Pos) IsValid() bool { return p.Line > 0 } // String returns a string in one of several forms: // // file:line:column valid position with file name // line:column valid position without file name // file invalid position with file name // - invalid position without file name func (p Pos) String() string { s := p.Filename if p.IsValid() { if s != "" { s += ":" } s += fmt.Sprintf("%d:%d", p.Line, p.Column) } if s == "" { s = "-" } return s } // Before reports whether the position p is before u. func (p Pos) Before(u Pos) bool { return u.Offset > p.Offset || u.Line > p.Line } // After reports whether the position p is after u. func (p Pos) After(u Pos) bool { return u.Offset < p.Offset || u.Line < p.Line } hcl-1.0.0/hcl/token/token.go000066400000000000000000000116641334037463000156260ustar00rootroot00000000000000// Package token defines constants representing the lexical tokens for HCL // (HashiCorp Configuration Language) package token import ( "fmt" "strconv" "strings" hclstrconv "github.com/hashicorp/hcl/hcl/strconv" ) // Token defines a single HCL token which can be obtained via the Scanner type Token struct { Type Type Pos Pos Text string JSON bool } // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) type Type int const ( // Special tokens ILLEGAL Type = iota EOF COMMENT identifier_beg IDENT // literals literal_beg NUMBER // 12345 FLOAT // 123.45 BOOL // true,false STRING // "abc" HEREDOC // < 0 { // Pop the current item n := len(frontier) item := frontier[n-1] frontier = frontier[:n-1] switch v := item.Val.(type) { case *ast.ObjectType: items, frontier = flattenObjectType(v, item, items, frontier) case *ast.ListType: items, frontier = flattenListType(v, item, items, frontier) default: items = append(items, item) } } // Reverse the list since the frontier model runs things backwards for i := len(items)/2 - 1; i >= 0; i-- { opp := len(items) - 1 - i items[i], items[opp] = items[opp], items[i] } // Done! Set the original items list.Items = items return n, true }) } func flattenListType( ot *ast.ListType, item *ast.ObjectItem, items []*ast.ObjectItem, frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { // If the list is empty, keep the original list if len(ot.List) == 0 { items = append(items, item) return items, frontier } // All the elements of this object must also be objects! for _, subitem := range ot.List { if _, ok := subitem.(*ast.ObjectType); !ok { items = append(items, item) return items, frontier } } // Great! We have a match go through all the items and flatten for _, elem := range ot.List { // Add it to the frontier so that we can recurse frontier = append(frontier, &ast.ObjectItem{ Keys: item.Keys, Assign: item.Assign, Val: elem, LeadComment: item.LeadComment, LineComment: item.LineComment, }) } return items, frontier } func flattenObjectType( ot *ast.ObjectType, item *ast.ObjectItem, items []*ast.ObjectItem, frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { // If the list has no items we do not have to flatten anything if ot.List.Items == nil { items = append(items, item) return items, frontier } // All the elements of this object must also be objects! for _, subitem := range ot.List.Items { if _, ok := subitem.Val.(*ast.ObjectType); !ok { items = append(items, item) return items, frontier } } // Great! We have a match go through all the items and flatten for _, subitem := range ot.List.Items { // Copy the new key keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) copy(keys, item.Keys) copy(keys[len(item.Keys):], subitem.Keys) // Add it to the frontier so that we can recurse frontier = append(frontier, &ast.ObjectItem{ Keys: keys, Assign: item.Assign, Val: subitem.Val, LeadComment: item.LeadComment, LineComment: item.LineComment, }) } return items, frontier } hcl-1.0.0/json/parser/parser.go000066400000000000000000000152431334037463000163560ustar00rootroot00000000000000package parser import ( "errors" "fmt" "github.com/hashicorp/hcl/hcl/ast" hcltoken "github.com/hashicorp/hcl/hcl/token" "github.com/hashicorp/hcl/json/scanner" "github.com/hashicorp/hcl/json/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = fmt.Errorf("%s: %s", pos, msg) } // The root must be an object in JSON object, err := p.object() if scerr != nil { return nil, scerr } if err != nil { return nil, err } // We make our final node an object list so it is more HCL compatible f.Node = object.List // Flatten it, which finds patterns and turns them into more HCL-like // AST trees. flattenObjects(f.Node) return f, nil } func (p *Parser) objectList() (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { n, err := p.objectItem() if err == errEofToken { break // we are finished } // we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // Check for a followup comma. If it isn't a comma, then we're done if tok := p.scan(); tok.Type != token.COMMA { break } } return node, nil } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } switch p.tok.Type { case token.COLON: pos := p.tok.Pos o.Assign = hcltoken.Pos{ Filename: pos.Filename, Offset: pos.Offset, Line: pos.Line, Column: pos.Column, } o.Val, err = p.objectValue() if err != nil { return nil, err } } return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: return nil, errEofToken case token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{ Token: p.tok.HCLToken(), }) case token.COLON: // If we have a zero keycount it means that we never got // an object key, i.e. `{ :`. This is a syntax error. if keyCount == 0 { return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) } // Done return keys, nil case token.ILLEGAL: return nil, errors.New("illegal") default: return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) objectValue() (ast.Node, error) { defer un(trace(p, "ParseObjectValue")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.EOF: return nil, errEofToken } return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (*ast.ObjectType, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.LBRACE: return p.objectType() case token.EOF: return nil, errEofToken } return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{} l, err := p.objectList() // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } o.List = l return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{} for { tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.STRING: node, err := p.literalType() if err != nil { return nil, err } l.Add(node) case token.COMMA: continue case token.LBRACE: node, err := p.objectType() if err != nil { return nil, err } l.Add(node) case token.BOOL: // TODO(arslan) should we support? not supported by HCL yet case token.LBRACK: // TODO(arslan) should we support nested lists? Even though it's // written in README of HCL, it's not a part of the grammar // (not defined in parse.y) case token.RBRACK: // finished return l, nil default: return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok.HCLToken(), }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } p.tok = p.sc.Scan() return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") } hcl-1.0.0/json/parser/parser_test.go000066400000000000000000000145541334037463000174210ustar00rootroot00000000000000package parser import ( "fmt" "io/ioutil" "path/filepath" "reflect" "runtime" "testing" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/token" ) func TestType(t *testing.T) { var literals = []struct { typ token.Type src string }{ {token.STRING, `"foo": "bar"`}, {token.NUMBER, `"foo": 123`}, {token.FLOAT, `"foo": 123.12`}, {token.FLOAT, `"foo": -123.12`}, {token.BOOL, `"foo": true`}, {token.STRING, `"foo": null`}, } for _, l := range literals { t.Logf("Testing: %s", l.src) p := newParser([]byte(l.src)) item, err := p.objectItem() if err != nil { t.Error(err) } lit, ok := item.Val.(*ast.LiteralType) if !ok { t.Errorf("node should be of type LiteralType, got: %T", item.Val) } if lit.Token.Type != l.typ { t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type) } } } func TestListType(t *testing.T) { var literals = []struct { src string tokens []token.Type }{ { `"foo": ["123", 123]`, []token.Type{token.STRING, token.NUMBER}, }, { `"foo": [123, "123",]`, []token.Type{token.NUMBER, token.STRING}, }, { `"foo": []`, []token.Type{}, }, { `"foo": ["123", 123]`, []token.Type{token.STRING, token.NUMBER}, }, { `"foo": ["123", {}]`, []token.Type{token.STRING, token.LBRACE}, }, } for _, l := range literals { t.Logf("Testing: %s", l.src) p := newParser([]byte(l.src)) item, err := p.objectItem() if err != nil { t.Error(err) } list, ok := item.Val.(*ast.ListType) if !ok { t.Errorf("node should be of type LiteralType, got: %T", item.Val) } tokens := []token.Type{} for _, li := range list.List { switch v := li.(type) { case *ast.LiteralType: tokens = append(tokens, v.Token.Type) case *ast.ObjectType: tokens = append(tokens, token.LBRACE) } } equals(t, l.tokens, tokens) } } func TestObjectType(t *testing.T) { var literals = []struct { src string nodeType []ast.Node itemLen int }{ { `"foo": {}`, nil, 0, }, { `"foo": { "bar": "fatih" }`, []ast.Node{&ast.LiteralType{}}, 1, }, { `"foo": { "bar": "fatih", "baz": ["arslan"] }`, []ast.Node{ &ast.LiteralType{}, &ast.ListType{}, }, 2, }, { `"foo": { "bar": {} }`, []ast.Node{ &ast.ObjectType{}, }, 1, }, { `"foo": { "bar": {}, "foo": true }`, []ast.Node{ &ast.ObjectType{}, &ast.LiteralType{}, }, 2, }, } for _, l := range literals { t.Logf("Testing:\n%s\n", l.src) p := newParser([]byte(l.src)) // p.enableTrace = true item, err := p.objectItem() if err != nil { t.Error(err) } // we know that the ObjectKey name is foo for all cases, what matters // is the object obj, ok := item.Val.(*ast.ObjectType) if !ok { t.Errorf("node should be of type LiteralType, got: %T", item.Val) } // check if the total length of items are correct equals(t, l.itemLen, len(obj.List.Items)) // check if the types are correct for i, item := range obj.List.Items { equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) } } } func TestFlattenObjects(t *testing.T) { var literals = []struct { src string nodeType []ast.Node itemLen int }{ { `{ "foo": [ { "foo": "svh", "bar": "fatih" } ] }`, []ast.Node{ &ast.ObjectType{}, &ast.LiteralType{}, &ast.LiteralType{}, }, 3, }, { `{ "variable": { "foo": {} } }`, []ast.Node{ &ast.ObjectType{}, }, 1, }, { `{ "empty": [] }`, []ast.Node{ &ast.ListType{}, }, 1, }, { `{ "basic": [1, 2, 3] }`, []ast.Node{ &ast.ListType{}, }, 1, }, } for _, l := range literals { t.Logf("Testing:\n%s\n", l.src) f, err := Parse([]byte(l.src)) if err != nil { t.Error(err) } // the first object is always an ObjectList so just assert that one // so we can use it as such obj, ok := f.Node.(*ast.ObjectList) if !ok { t.Errorf("node should be *ast.ObjectList, got: %T", f.Node) } // check if the types are correct var i int for _, item := range obj.Items { equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) i++ if obj, ok := item.Val.(*ast.ObjectType); ok { for _, item := range obj.List.Items { equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val)) i++ } } } // check if the number of items is correct equals(t, l.itemLen, i) } } func TestObjectKey(t *testing.T) { keys := []struct { exp []token.Type src string }{ {[]token.Type{token.STRING}, `"foo": {}`}, } for _, k := range keys { p := newParser([]byte(k.src)) keys, err := p.objectKey() if err != nil { t.Fatal(err) } tokens := []token.Type{} for _, o := range keys { tokens = append(tokens, o.Token.Type) } equals(t, k.exp, tokens) } errKeys := []struct { src string }{ {`foo 12 {}`}, {`foo bar = {}`}, {`foo []`}, {`12 {}`}, } for _, k := range errKeys { p := newParser([]byte(k.src)) _, err := p.objectKey() if err == nil { t.Errorf("case '%s' should give an error", k.src) } } } // Official HCL tests func TestParse(t *testing.T) { cases := []struct { Name string Err bool }{ { "array.json", false, }, { "basic.json", false, }, { "object.json", false, }, { "types.json", false, }, { "bad_input_128.json", true, }, { "bad_input_tf_8110.json", true, }, { "good_input_tf_8110.json", false, }, } const fixtureDir = "./test-fixtures" for _, tc := range cases { d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name)) if err != nil { t.Fatalf("err: %s", err) } _, err = Parse(d) if (err != nil) != tc.Err { t.Fatalf("Input: %s\n\nError: %s", tc.Name, err) } } } func TestParse_inline(t *testing.T) { cases := []struct { Value string Err bool }{ {"{:{", true}, } for _, tc := range cases { _, err := Parse([]byte(tc.Value)) if (err != nil) != tc.Err { t.Fatalf("Input: %q\n\nError: %s", tc.Value, err) } } } // equals fails the test if exp is not equal to act. func equals(tb testing.TB, exp, act interface{}) { if !reflect.DeepEqual(exp, act) { _, file, line, _ := runtime.Caller(1) fmt.Printf("\033[31m%s:%d:\n\n\texp: %s\n\n\tgot: %s\033[39m\n\n", filepath.Base(file), line, exp, act) tb.FailNow() } } hcl-1.0.0/json/parser/test-fixtures/000077500000000000000000000000001334037463000173545ustar00rootroot00000000000000hcl-1.0.0/json/parser/test-fixtures/array.json000066400000000000000000000000551334037463000213650ustar00rootroot00000000000000{ "foo": [1, 2, "bar"], "bar": "baz" } hcl-1.0.0/json/parser/test-fixtures/bad_input_128.json000066400000000000000000000000041334037463000226000ustar00rootroot00000000000000{:{ hcl-1.0.0/json/parser/test-fixtures/bad_input_tf_8110.json000066400000000000000000000001421334037463000233530ustar00rootroot00000000000000{ "variable": { "poc": { "default": "${replace("europe-west", "-", " ")}" } } } hcl-1.0.0/json/parser/test-fixtures/basic.json000066400000000000000000000000251334037463000213250ustar00rootroot00000000000000{ "foo": "bar" } hcl-1.0.0/json/parser/test-fixtures/good_input_tf_8110.json000066400000000000000000000001501334037463000235540ustar00rootroot00000000000000{ "variable": { "poc": { "default": "${replace(\"europe-west\", \"-\", \" \")}" } } } hcl-1.0.0/json/parser/test-fixtures/object.json000066400000000000000000000000451334037463000215140ustar00rootroot00000000000000{ "foo": { "bar": [1,2] } } hcl-1.0.0/json/parser/test-fixtures/types.json000066400000000000000000000002121334037463000214060ustar00rootroot00000000000000{ "foo": "bar", "bar": 7, "baz": [1,2,3], "foo": -12, "bar": 3.14159, "foo": true, "bar": false, "foo": null } hcl-1.0.0/json/scanner/000077500000000000000000000000001334037463000146635ustar00rootroot00000000000000hcl-1.0.0/json/scanner/scanner.go000066400000000000000000000245621334037463000166540ustar00rootroot00000000000000package scanner import ( "bytes" "fmt" "os" "unicode" "unicode/utf8" "github.com/hashicorp/hcl/json/token" ) // eof represents a marker rune for the end of the reader. const eof = rune(0) // Scanner defines a lexical scanner type Scanner struct { buf *bytes.Buffer // Source buffer for advancing and scanning src []byte // Source buffer for immutable access // Source Position srcPos token.Pos // current position prevPos token.Pos // previous position, used for peek() method lastCharLen int // length of last character in bytes lastLineLen int // length of last line in characters (for correct column reporting) tokStart int // token text start position tokEnd int // token text end position // Error is called for each error encountered. If no Error // function is set, the error is reported to os.Stderr. Error func(pos token.Pos, msg string) // ErrorCount is incremented by one for each error encountered. ErrorCount int // tokPos is the start position of most recently scanned token; set by // Scan. The Filename field is always left untouched by the Scanner. If // an error is reported (via Error) and Position is invalid, the scanner is // not inside a token. tokPos token.Pos } // New creates and initializes a new instance of Scanner using src as // its source content. func New(src []byte) *Scanner { // even though we accept a src, we read from a io.Reader compatible type // (*bytes.Buffer). So in the future we might easily change it to streaming // read. b := bytes.NewBuffer(src) s := &Scanner{ buf: b, src: src, } // srcPosition always starts with 1 s.srcPos.Line = 1 return s } // next reads the next rune from the bufferred reader. Returns the rune(0) if // an error occurs (or io.EOF is returned). func (s *Scanner) next() rune { ch, size, err := s.buf.ReadRune() if err != nil { // advance for error reporting s.srcPos.Column++ s.srcPos.Offset += size s.lastCharLen = size return eof } if ch == utf8.RuneError && size == 1 { s.srcPos.Column++ s.srcPos.Offset += size s.lastCharLen = size s.err("illegal UTF-8 encoding") return ch } // remember last position s.prevPos = s.srcPos s.srcPos.Column++ s.lastCharLen = size s.srcPos.Offset += size if ch == '\n' { s.srcPos.Line++ s.lastLineLen = s.srcPos.Column s.srcPos.Column = 0 } // debug // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) return ch } // unread unreads the previous read Rune and updates the source position func (s *Scanner) unread() { if err := s.buf.UnreadRune(); err != nil { panic(err) // this is user fault, we should catch it } s.srcPos = s.prevPos // put back last position } // peek returns the next rune without advancing the reader. func (s *Scanner) peek() rune { peek, _, err := s.buf.ReadRune() if err != nil { return eof } s.buf.UnreadRune() return peek } // Scan scans the next token and returns the token. func (s *Scanner) Scan() token.Token { ch := s.next() // skip white space for isWhitespace(ch) { ch = s.next() } var tok token.Type // token text markings s.tokStart = s.srcPos.Offset - s.lastCharLen // token position, initial next() is moving the offset by one(size of rune // actually), though we are interested with the starting point s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen if s.srcPos.Column > 0 { // common case: last character was not a '\n' s.tokPos.Line = s.srcPos.Line s.tokPos.Column = s.srcPos.Column } else { // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) s.tokPos.Line = s.srcPos.Line - 1 s.tokPos.Column = s.lastLineLen } switch { case isLetter(ch): lit := s.scanIdentifier() if lit == "true" || lit == "false" { tok = token.BOOL } else if lit == "null" { tok = token.NULL } else { s.err("illegal char") } case isDecimal(ch): tok = s.scanNumber(ch) default: switch ch { case eof: tok = token.EOF case '"': tok = token.STRING s.scanString() case '.': tok = token.PERIOD ch = s.peek() if isDecimal(ch) { tok = token.FLOAT ch = s.scanMantissa(ch) ch = s.scanExponent(ch) } case '[': tok = token.LBRACK case ']': tok = token.RBRACK case '{': tok = token.LBRACE case '}': tok = token.RBRACE case ',': tok = token.COMMA case ':': tok = token.COLON case '-': if isDecimal(s.peek()) { ch := s.next() tok = s.scanNumber(ch) } else { s.err("illegal char") } default: s.err("illegal char: " + string(ch)) } } // finish token ending s.tokEnd = s.srcPos.Offset // create token literal var tokenText string if s.tokStart >= 0 { tokenText = string(s.src[s.tokStart:s.tokEnd]) } s.tokStart = s.tokEnd // ensure idempotency of tokenText() call return token.Token{ Type: tok, Pos: s.tokPos, Text: tokenText, } } // scanNumber scans a HCL number definition starting with the given rune func (s *Scanner) scanNumber(ch rune) token.Type { zero := ch == '0' pos := s.srcPos s.scanMantissa(ch) ch = s.next() // seek forward if ch == 'e' || ch == 'E' { ch = s.scanExponent(ch) return token.FLOAT } if ch == '.' { ch = s.scanFraction(ch) if ch == 'e' || ch == 'E' { ch = s.next() ch = s.scanExponent(ch) } return token.FLOAT } if ch != eof { s.unread() } // If we have a larger number and this is zero, error if zero && pos != s.srcPos { s.err("numbers cannot start with 0") } return token.NUMBER } // scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false for isDecimal(ch) { ch = s.next() scanned = true } if scanned && ch != eof { s.unread() } return ch } // scanFraction scans the fraction after the '.' rune func (s *Scanner) scanFraction(ch rune) rune { if ch == '.' { ch = s.peek() // we peek just to see if we can move forward ch = s.scanMantissa(ch) } return ch } // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' // rune. func (s *Scanner) scanExponent(ch rune) rune { if ch == 'e' || ch == 'E' { ch = s.next() if ch == '-' || ch == '+' { ch = s.next() } ch = s.scanMantissa(ch) } return ch } // scanString scans a quoted string func (s *Scanner) scanString() { braces := 0 for { // '"' opening already consumed // read character after quote ch := s.next() if ch == '\n' || ch < 0 || ch == eof { s.err("literal not terminated") return } if ch == '"' { break } // If we're going into a ${} then we can ignore quotes for awhile if braces == 0 && ch == '$' && s.peek() == '{' { braces++ s.next() } else if braces > 0 && ch == '{' { braces++ } if braces > 0 && ch == '}' { braces-- } if ch == '\\' { s.scanEscape() } } return } // scanEscape scans an escape sequence func (s *Scanner) scanEscape() rune { // http://en.cppreference.com/w/cpp/language/escape ch := s.next() // read character after '/' switch ch { case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': // nothing to do case '0', '1', '2', '3', '4', '5', '6', '7': // octal notation ch = s.scanDigits(ch, 8, 3) case 'x': // hexademical notation ch = s.scanDigits(s.next(), 16, 2) case 'u': // universal character name ch = s.scanDigits(s.next(), 16, 4) case 'U': // universal character name ch = s.scanDigits(s.next(), 16, 8) default: s.err("illegal char escape") } return ch } // scanDigits scans a rune with the given base for n times. For example an // octal notation \184 would yield in scanDigits(ch, 8, 3) func (s *Scanner) scanDigits(ch rune, base, n int) rune { for n > 0 && digitVal(ch) < base { ch = s.next() n-- } if n > 0 { s.err("illegal char escape") } // we scanned all digits, put the last non digit char back s.unread() return ch } // scanIdentifier scans an identifier and returns the literal string func (s *Scanner) scanIdentifier() string { offs := s.srcPos.Offset - s.lastCharLen ch := s.next() for isLetter(ch) || isDigit(ch) || ch == '-' { ch = s.next() } if ch != eof { s.unread() // we got identifier, put back latest char } return string(s.src[offs:s.srcPos.Offset]) } // recentPosition returns the position of the character immediately after the // character or token returned by the last call to Scan. func (s *Scanner) recentPosition() (pos token.Pos) { pos.Offset = s.srcPos.Offset - s.lastCharLen switch { case s.srcPos.Column > 0: // common case: last character was not a '\n' pos.Line = s.srcPos.Line pos.Column = s.srcPos.Column case s.lastLineLen > 0: // last character was a '\n' // (we cannot be at the beginning of the source // since we have called next() at least once) pos.Line = s.srcPos.Line - 1 pos.Column = s.lastLineLen default: // at the beginning of the source pos.Line = 1 pos.Column = 1 } return } // err prints the error of any scanning to s.Error function. If the function is // not defined, by default it prints them to os.Stderr func (s *Scanner) err(msg string) { s.ErrorCount++ pos := s.recentPosition() if s.Error != nil { s.Error(pos, msg) return } fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) } // isHexadecimal returns true if the given rune is a letter func isLetter(ch rune) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) } // isHexadecimal returns true if the given rune is a decimal digit func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) } // isHexadecimal returns true if the given rune is a decimal number func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } // isHexadecimal returns true if the given rune is an hexadecimal number func isHexadecimal(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' } // isWhitespace returns true if the rune is a space, tab, newline or carriage return func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } // digitVal returns the integer value of a given octal,decimal or hexadecimal rune func digitVal(ch rune) int { switch { case '0' <= ch && ch <= '9': return int(ch - '0') case 'a' <= ch && ch <= 'f': return int(ch - 'a' + 10) case 'A' <= ch && ch <= 'F': return int(ch - 'A' + 10) } return 16 // larger than any legal digit val } hcl-1.0.0/json/scanner/scanner_test.go000066400000000000000000000211561334037463000177070ustar00rootroot00000000000000package scanner import ( "bytes" "fmt" "testing" "github.com/hashicorp/hcl/json/token" ) var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" type tokenPair struct { tok token.Type text string } var tokenLists = map[string][]tokenPair{ "operator": []tokenPair{ {token.LBRACK, "["}, {token.LBRACE, "{"}, {token.COMMA, ","}, {token.PERIOD, "."}, {token.RBRACK, "]"}, {token.RBRACE, "}"}, }, "bool": []tokenPair{ {token.BOOL, "true"}, {token.BOOL, "false"}, }, "string": []tokenPair{ {token.STRING, `" "`}, {token.STRING, `"a"`}, {token.STRING, `"本"`}, {token.STRING, `"${file(\"foo\")}"`}, {token.STRING, `"\a"`}, {token.STRING, `"\b"`}, {token.STRING, `"\f"`}, {token.STRING, `"\n"`}, {token.STRING, `"\r"`}, {token.STRING, `"\t"`}, {token.STRING, `"\v"`}, {token.STRING, `"\""`}, {token.STRING, `"\000"`}, {token.STRING, `"\777"`}, {token.STRING, `"\x00"`}, {token.STRING, `"\xff"`}, {token.STRING, `"\u0000"`}, {token.STRING, `"\ufA16"`}, {token.STRING, `"\U00000000"`}, {token.STRING, `"\U0000ffAB"`}, {token.STRING, `"` + f100 + `"`}, }, "number": []tokenPair{ {token.NUMBER, "0"}, {token.NUMBER, "1"}, {token.NUMBER, "9"}, {token.NUMBER, "42"}, {token.NUMBER, "1234567890"}, {token.NUMBER, "-0"}, {token.NUMBER, "-1"}, {token.NUMBER, "-9"}, {token.NUMBER, "-42"}, {token.NUMBER, "-1234567890"}, }, "float": []tokenPair{ {token.FLOAT, "0."}, {token.FLOAT, "1."}, {token.FLOAT, "42."}, {token.FLOAT, "01234567890."}, {token.FLOAT, ".0"}, {token.FLOAT, ".1"}, {token.FLOAT, ".42"}, {token.FLOAT, ".0123456789"}, {token.FLOAT, "0.0"}, {token.FLOAT, "1.0"}, {token.FLOAT, "42.0"}, {token.FLOAT, "01234567890.0"}, {token.FLOAT, "0e0"}, {token.FLOAT, "1e0"}, {token.FLOAT, "42e0"}, {token.FLOAT, "01234567890e0"}, {token.FLOAT, "0E0"}, {token.FLOAT, "1E0"}, {token.FLOAT, "42E0"}, {token.FLOAT, "01234567890E0"}, {token.FLOAT, "0e+10"}, {token.FLOAT, "1e-10"}, {token.FLOAT, "42e+10"}, {token.FLOAT, "01234567890e-10"}, {token.FLOAT, "0E+10"}, {token.FLOAT, "1E-10"}, {token.FLOAT, "42E+10"}, {token.FLOAT, "01234567890E-10"}, {token.FLOAT, "01.8e0"}, {token.FLOAT, "1.4e0"}, {token.FLOAT, "42.2e0"}, {token.FLOAT, "01234567890.12e0"}, {token.FLOAT, "0.E0"}, {token.FLOAT, "1.12E0"}, {token.FLOAT, "42.123E0"}, {token.FLOAT, "01234567890.213E0"}, {token.FLOAT, "0.2e+10"}, {token.FLOAT, "1.2e-10"}, {token.FLOAT, "42.54e+10"}, {token.FLOAT, "01234567890.98e-10"}, {token.FLOAT, "0.1E+10"}, {token.FLOAT, "1.1E-10"}, {token.FLOAT, "42.1E+10"}, {token.FLOAT, "01234567890.1E-10"}, {token.FLOAT, "-0.0"}, {token.FLOAT, "-1.0"}, {token.FLOAT, "-42.0"}, {token.FLOAT, "-01234567890.0"}, {token.FLOAT, "-0e0"}, {token.FLOAT, "-1e0"}, {token.FLOAT, "-42e0"}, {token.FLOAT, "-01234567890e0"}, {token.FLOAT, "-0E0"}, {token.FLOAT, "-1E0"}, {token.FLOAT, "-42E0"}, {token.FLOAT, "-01234567890E0"}, {token.FLOAT, "-0e+10"}, {token.FLOAT, "-1e-10"}, {token.FLOAT, "-42e+10"}, {token.FLOAT, "-01234567890e-10"}, {token.FLOAT, "-0E+10"}, {token.FLOAT, "-1E-10"}, {token.FLOAT, "-42E+10"}, {token.FLOAT, "-01234567890E-10"}, {token.FLOAT, "-01.8e0"}, {token.FLOAT, "-1.4e0"}, {token.FLOAT, "-42.2e0"}, {token.FLOAT, "-01234567890.12e0"}, {token.FLOAT, "-0.E0"}, {token.FLOAT, "-1.12E0"}, {token.FLOAT, "-42.123E0"}, {token.FLOAT, "-01234567890.213E0"}, {token.FLOAT, "-0.2e+10"}, {token.FLOAT, "-1.2e-10"}, {token.FLOAT, "-42.54e+10"}, {token.FLOAT, "-01234567890.98e-10"}, {token.FLOAT, "-0.1E+10"}, {token.FLOAT, "-1.1E-10"}, {token.FLOAT, "-42.1E+10"}, {token.FLOAT, "-01234567890.1E-10"}, }, } var orderedTokenLists = []string{ "comment", "operator", "bool", "string", "number", "float", } func TestPosition(t *testing.T) { // create artifical source code buf := new(bytes.Buffer) for _, listName := range orderedTokenLists { for _, ident := range tokenLists[listName] { fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text) } } s := New(buf.Bytes()) pos := token.Pos{"", 4, 1, 5} s.Scan() for _, listName := range orderedTokenLists { for _, k := range tokenLists[listName] { curPos := s.tokPos // fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column) if curPos.Offset != pos.Offset { t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text) } if curPos.Line != pos.Line { t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text) } if curPos.Column != pos.Column { t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text) } pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline pos.Line += countNewlines(k.text) + 1 // each token is on a new line s.Error = func(pos token.Pos, msg string) { t.Errorf("error %q for %q", msg, k.text) } s.Scan() } } // make sure there were no token-internal errors reported by scanner if s.ErrorCount != 0 { t.Errorf("%d errors", s.ErrorCount) } } func TestComment(t *testing.T) { testTokenList(t, tokenLists["comment"]) } func TestOperator(t *testing.T) { testTokenList(t, tokenLists["operator"]) } func TestBool(t *testing.T) { testTokenList(t, tokenLists["bool"]) } func TestIdent(t *testing.T) { testTokenList(t, tokenLists["ident"]) } func TestString(t *testing.T) { testTokenList(t, tokenLists["string"]) } func TestNumber(t *testing.T) { testTokenList(t, tokenLists["number"]) } func TestFloat(t *testing.T) { testTokenList(t, tokenLists["float"]) } func TestRealExample(t *testing.T) { complexReal := ` { "variable": { "foo": { "default": "bar", "description": "bar", "depends_on": ["something"] } } }` literals := []struct { tokenType token.Type literal string }{ {token.LBRACE, `{`}, {token.STRING, `"variable"`}, {token.COLON, `:`}, {token.LBRACE, `{`}, {token.STRING, `"foo"`}, {token.COLON, `:`}, {token.LBRACE, `{`}, {token.STRING, `"default"`}, {token.COLON, `:`}, {token.STRING, `"bar"`}, {token.COMMA, `,`}, {token.STRING, `"description"`}, {token.COLON, `:`}, {token.STRING, `"bar"`}, {token.COMMA, `,`}, {token.STRING, `"depends_on"`}, {token.COLON, `:`}, {token.LBRACK, `[`}, {token.STRING, `"something"`}, {token.RBRACK, `]`}, {token.RBRACE, `}`}, {token.RBRACE, `}`}, {token.RBRACE, `}`}, {token.EOF, ``}, } s := New([]byte(complexReal)) for _, l := range literals { tok := s.Scan() if l.tokenType != tok.Type { t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String()) } if l.literal != tok.Text { t.Errorf("got: %s want %s\n", tok, l.literal) } } } func TestError(t *testing.T) { testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING) testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING) testError(t, `01238`, "1:7", "numbers cannot start with 0", token.NUMBER) testError(t, `01238123`, "1:10", "numbers cannot start with 0", token.NUMBER) testError(t, `'aa'`, "1:1", "illegal char: '", token.ILLEGAL) testError(t, `"`, "1:2", "literal not terminated", token.STRING) testError(t, `"abc`, "1:5", "literal not terminated", token.STRING) testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING) } func testError(t *testing.T, src, pos, msg string, tok token.Type) { s := New([]byte(src)) errorCalled := false s.Error = func(p token.Pos, m string) { if !errorCalled { if pos != p.String() { t.Errorf("pos = %q, want %q for %q", p, pos, src) } if m != msg { t.Errorf("msg = %q, want %q for %q", m, msg, src) } errorCalled = true } } tk := s.Scan() if tk.Type != tok { t.Errorf("tok = %s, want %s for %q", tk, tok, src) } if !errorCalled { t.Errorf("error handler not called for %q", src) } if s.ErrorCount == 0 { t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src) } } func testTokenList(t *testing.T, tokenList []tokenPair) { // create artifical source code buf := new(bytes.Buffer) for _, ident := range tokenList { fmt.Fprintf(buf, "%s\n", ident.text) } s := New(buf.Bytes()) for _, ident := range tokenList { tok := s.Scan() if tok.Type != ident.tok { t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text) } if tok.Text != ident.text { t.Errorf("text = %q want %q", tok.String(), ident.text) } } } func countNewlines(s string) int { n := 0 for _, ch := range s { if ch == '\n' { n++ } } return n } hcl-1.0.0/json/test-fixtures/000077500000000000000000000000001334037463000160605ustar00rootroot00000000000000hcl-1.0.0/json/test-fixtures/array.json000066400000000000000000000000551334037463000200710ustar00rootroot00000000000000{ "foo": [1, 2, "bar"], "bar": "baz" } hcl-1.0.0/json/test-fixtures/basic.json000066400000000000000000000000251334037463000200310ustar00rootroot00000000000000{ "foo": "bar" } hcl-1.0.0/json/test-fixtures/object.json000066400000000000000000000000451334037463000202200ustar00rootroot00000000000000{ "foo": { "bar": [1,2] } } hcl-1.0.0/json/test-fixtures/types.json000066400000000000000000000002121334037463000201120ustar00rootroot00000000000000{ "foo": "bar", "bar": 7, "baz": [1,2,3], "foo": -12, "bar": 3.14159, "foo": true, "bar": false, "foo": null } hcl-1.0.0/json/token/000077500000000000000000000000001334037463000143525ustar00rootroot00000000000000hcl-1.0.0/json/token/position.go000066400000000000000000000023121334037463000165430ustar00rootroot00000000000000package token import "fmt" // Pos describes an arbitrary source position // including the file, line, and column location. // A Position is valid if the line number is > 0. type Pos struct { Filename string // filename, if any Offset int // offset, starting at 0 Line int // line number, starting at 1 Column int // column number, starting at 1 (character count) } // IsValid returns true if the position is valid. func (p *Pos) IsValid() bool { return p.Line > 0 } // String returns a string in one of several forms: // // file:line:column valid position with file name // line:column valid position without file name // file invalid position with file name // - invalid position without file name func (p Pos) String() string { s := p.Filename if p.IsValid() { if s != "" { s += ":" } s += fmt.Sprintf("%d:%d", p.Line, p.Column) } if s == "" { s = "-" } return s } // Before reports whether the position p is before u. func (p Pos) Before(u Pos) bool { return u.Offset > p.Offset || u.Line > p.Line } // After reports whether the position p is after u. func (p Pos) After(u Pos) bool { return u.Offset < p.Offset || u.Line < p.Line } hcl-1.0.0/json/token/token.go000066400000000000000000000051311334037463000160210ustar00rootroot00000000000000package token import ( "fmt" "strconv" hcltoken "github.com/hashicorp/hcl/hcl/token" ) // Token defines a single HCL token which can be obtained via the Scanner type Token struct { Type Type Pos Pos Text string } // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) type Type int const ( // Special tokens ILLEGAL Type = iota EOF identifier_beg literal_beg NUMBER // 12345 FLOAT // 123.45 BOOL // true,false STRING // "abc" NULL // null literal_end identifier_end operator_beg LBRACK // [ LBRACE // { COMMA // , PERIOD // . COLON // : RBRACK // ] RBRACE // } operator_end ) var tokens = [...]string{ ILLEGAL: "ILLEGAL", EOF: "EOF", NUMBER: "NUMBER", FLOAT: "FLOAT", BOOL: "BOOL", STRING: "STRING", NULL: "NULL", LBRACK: "LBRACK", LBRACE: "LBRACE", COMMA: "COMMA", PERIOD: "PERIOD", COLON: "COLON", RBRACK: "RBRACK", RBRACE: "RBRACE", } // String returns the string corresponding to the token tok. func (t Type) String() string { s := "" if 0 <= t && t < Type(len(tokens)) { s = tokens[t] } if s == "" { s = "token(" + strconv.Itoa(int(t)) + ")" } return s } // IsIdentifier returns true for tokens corresponding to identifiers and basic // type literals; it returns false otherwise. func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } // IsLiteral returns true for tokens corresponding to basic type literals; it // returns false otherwise. func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } // IsOperator returns true for tokens corresponding to operators and // delimiters; it returns false otherwise. func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } // String returns the token's literal text. Note that this is only // applicable for certain token types, such as token.IDENT, // token.STRING, etc.. func (t Token) String() string { return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) } // HCLToken converts this token to an HCL token. // // The token type must be a literal type or this will panic. func (t Token) HCLToken() hcltoken.Token { switch t.Type { case BOOL: return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} case FLOAT: return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} case NULL: return hcltoken.Token{Type: hcltoken.STRING, Text: ""} case NUMBER: return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} case STRING: return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} default: panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) } } hcl-1.0.0/json/token/token_test.go000066400000000000000000000010301334037463000170520ustar00rootroot00000000000000package token import ( "testing" ) func TestTypeString(t *testing.T) { var tokens = []struct { tt Type str string }{ {ILLEGAL, "ILLEGAL"}, {EOF, "EOF"}, {NUMBER, "NUMBER"}, {FLOAT, "FLOAT"}, {BOOL, "BOOL"}, {STRING, "STRING"}, {NULL, "NULL"}, {LBRACK, "LBRACK"}, {LBRACE, "LBRACE"}, {COMMA, "COMMA"}, {PERIOD, "PERIOD"}, {RBRACK, "RBRACK"}, {RBRACE, "RBRACE"}, } for _, token := range tokens { if token.tt.String() != token.str { t.Errorf("want: %q got:%q\n", token.str, token.tt) } } } hcl-1.0.0/lex.go000066400000000000000000000007561334037463000134100ustar00rootroot00000000000000package hcl import ( "unicode" "unicode/utf8" ) type lexModeValue byte const ( lexModeUnknown lexModeValue = iota lexModeHcl lexModeJson ) // lexMode returns whether we're going to be parsing in JSON // mode or HCL mode. func lexMode(v []byte) lexModeValue { var ( r rune w int offset int ) for { r, w = utf8.DecodeRune(v[offset:]) offset += w if unicode.IsSpace(r) { continue } if r == '{' { return lexModeJson } break } return lexModeHcl } hcl-1.0.0/lex_test.go000066400000000000000000000006241334037463000144410ustar00rootroot00000000000000package hcl import ( "testing" ) func TestLexMode(t *testing.T) { cases := []struct { Input string Mode lexModeValue }{ { "", lexModeHcl, }, { "foo", lexModeHcl, }, { "{}", lexModeJson, }, { " {}", lexModeJson, }, } for i, tc := range cases { actual := lexMode([]byte(tc.Input)) if actual != tc.Mode { t.Fatalf("%d: %#v", i, actual) } } } hcl-1.0.0/parse.go000066400000000000000000000015741334037463000137310ustar00rootroot00000000000000package hcl import ( "fmt" "github.com/hashicorp/hcl/hcl/ast" hclParser "github.com/hashicorp/hcl/hcl/parser" jsonParser "github.com/hashicorp/hcl/json/parser" ) // ParseBytes accepts as input byte slice and returns ast tree. // // Input can be either JSON or HCL func ParseBytes(in []byte) (*ast.File, error) { return parse(in) } // ParseString accepts input as a string and returns ast tree. func ParseString(input string) (*ast.File, error) { return parse([]byte(input)) } func parse(in []byte) (*ast.File, error) { switch lexMode(in) { case lexModeHcl: return hclParser.Parse(in) case lexModeJson: return jsonParser.Parse(in) } return nil, fmt.Errorf("unknown config format") } // Parse parses the given input and returns the root object. // // The input format can be either HCL or JSON. func Parse(input string) (*ast.File, error) { return parse([]byte(input)) } hcl-1.0.0/test-fixtures/000077500000000000000000000000001334037463000151075ustar00rootroot00000000000000hcl-1.0.0/test-fixtures/assign_deep.hcl000066400000000000000000000000571334037463000200620ustar00rootroot00000000000000resource = [{ foo = [{ bar = {} }] }] hcl-1.0.0/test-fixtures/basic.hcl000066400000000000000000000000571334037463000166620ustar00rootroot00000000000000foo = "bar" bar = "${file("bing/bong.txt")}" hcl-1.0.0/test-fixtures/basic.json000066400000000000000000000000771334037463000170670ustar00rootroot00000000000000{ "foo": "bar", "bar": "${file(\"bing/bong.txt\")}" } hcl-1.0.0/test-fixtures/basic_int_string.hcl000066400000000000000000000000141334037463000211130ustar00rootroot00000000000000count = "3" hcl-1.0.0/test-fixtures/basic_squish.hcl000066400000000000000000000000671334037463000202570ustar00rootroot00000000000000foo="bar" bar="${file("bing/bong.txt")}" foo-bar="baz" hcl-1.0.0/test-fixtures/block_assign.hcl000066400000000000000000000000301334037463000202260ustar00rootroot00000000000000environment = "aws" { } hcl-1.0.0/test-fixtures/decode_policy.hcl000066400000000000000000000002131334037463000203750ustar00rootroot00000000000000key "" { policy = "read" } key "foo/" { policy = "write" } key "foo/bar/" { policy = "read" } key "foo/bar/baz" { policy = "deny" } hcl-1.0.0/test-fixtures/decode_policy.json000066400000000000000000000004111334037463000206000ustar00rootroot00000000000000{ "key": { "": { "policy": "read" }, "foo/": { "policy": "write" }, "foo/bar/": { "policy": "read" }, "foo/bar/baz": { "policy": "deny" } } } hcl-1.0.0/test-fixtures/decode_tf_variable.hcl000066400000000000000000000001771334037463000213650ustar00rootroot00000000000000variable "foo" { default = "bar" description = "bar" } variable "amis" { default = { east = "foo" } } hcl-1.0.0/test-fixtures/decode_tf_variable.json000066400000000000000000000003311334037463000215600ustar00rootroot00000000000000{ "variable": { "foo": { "default": "bar", "description": "bar" }, "amis": { "default": { "east": "foo" } } } } hcl-1.0.0/test-fixtures/empty.hcl000066400000000000000000000000221334037463000167270ustar00rootroot00000000000000resource "foo" {} hcl-1.0.0/test-fixtures/escape.hcl000066400000000000000000000002331334037463000170350ustar00rootroot00000000000000foo = "bar\"baz\\n" bar = "new\nline" qux = "back\\slash" qax = "slash\\:colon" nested = "${HH\\:mm\\:ss}" nestedquotes = "${"\"stringwrappedinquotes\""}" hcl-1.0.0/test-fixtures/escape_backslash.hcl000066400000000000000000000002471334037463000210550ustar00rootroot00000000000000output { one = "${replace(var.sub_domain, ".", "\\.")}" two = "${replace(var.sub_domain, ".", "\\\\.")}" many = "${replace(var.sub_domain, ".", "\\\\\\\\.")}" } hcl-1.0.0/test-fixtures/flat.hcl000066400000000000000000000000261334037463000165230ustar00rootroot00000000000000foo = "bar" Key = 7 hcl-1.0.0/test-fixtures/float.hcl000066400000000000000000000000171334037463000167020ustar00rootroot00000000000000a = 1.02 b = 2 hcl-1.0.0/test-fixtures/float.json000066400000000000000000000000301334037463000171000ustar00rootroot00000000000000{ "a": 1.02, "b": 2 } hcl-1.0.0/test-fixtures/git_crypt.hcl000066400000000000000000000000121334037463000175740ustar00rootroot00000000000000GITCRYPT hcl-1.0.0/test-fixtures/interpolate.json000066400000000000000000000000751334037463000203320ustar00rootroot00000000000000{ "default": "${replace(\"europe-west\", \"-\", \" \")}" } hcl-1.0.0/test-fixtures/list_of_lists.hcl000066400000000000000000000000321334037463000204470ustar00rootroot00000000000000foo = [["foo"], ["bar"]] hcl-1.0.0/test-fixtures/list_of_maps.hcl000066400000000000000000000001371334037463000202570ustar00rootroot00000000000000foo = [ {somekey1 = "someval1"}, {somekey2 = "someval2", someextrakey = "someextraval"}, ] hcl-1.0.0/test-fixtures/multiline.hcl000066400000000000000000000000301334037463000175720ustar00rootroot00000000000000foo = <