pax_global_header00006660000000000000000000000064140445503670014522gustar00rootroot0000000000000052 comment=d4ce1f938f7a7ea2a40bff4544b56be9c00b5e84 copystructure-1.2.0/000077500000000000000000000000001404455036700144555ustar00rootroot00000000000000copystructure-1.2.0/.github/000077500000000000000000000000001404455036700160155ustar00rootroot00000000000000copystructure-1.2.0/.github/workflows/000077500000000000000000000000001404455036700200525ustar00rootroot00000000000000copystructure-1.2.0/.github/workflows/test.yml000066400000000000000000000006041404455036700215540ustar00rootroot00000000000000on: [push, pull_request] name: Test jobs: test: strategy: matrix: go-version: [1.15.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - name: Install Go uses: actions/setup-go@v2 with: go-version: ${{ matrix.go-version }} - name: Checkout code uses: actions/checkout@v2 - name: Test run: go test ./... copystructure-1.2.0/LICENSE000066400000000000000000000020751404455036700154660ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2014 Mitchell Hashimoto Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. copystructure-1.2.0/README.md000066400000000000000000000007741404455036700157440ustar00rootroot00000000000000# copystructure copystructure is a Go library for deep copying values in Go. This allows you to copy Go values that may contain reference values such as maps, slices, or pointers, and copy their data as well instead of just their references. ## Installation Standard `go get`: ``` $ go get github.com/mitchellh/copystructure ``` ## Usage & Example For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). The `Copy` function has examples associated with it there. copystructure-1.2.0/copier_time.go000066400000000000000000000003421404455036700173020ustar00rootroot00000000000000package copystructure import ( "reflect" "time" ) func init() { Copiers[reflect.TypeOf(time.Time{})] = timeCopier } func timeCopier(v interface{}) (interface{}, error) { // Just... copy it. return v.(time.Time), nil } copystructure-1.2.0/copier_time_test.go000066400000000000000000000004121404455036700203370ustar00rootroot00000000000000package copystructure import ( "testing" "time" ) func TestTimeCopier(t *testing.T) { v := time.Now().UTC() result, err := timeCopier(v) if err != nil { t.Fatalf("err: %s", err) } if result.(time.Time) != v { t.Fatalf("bad: %#v\n\n%#v", v, result) } } copystructure-1.2.0/copystructure.go000066400000000000000000000362271404455036700177510ustar00rootroot00000000000000package copystructure import ( "errors" "reflect" "sync" "github.com/mitchellh/reflectwalk" ) const tagKey = "copy" // Copy returns a deep copy of v. // // Copy is unable to copy unexported fields in a struct (lowercase field names). // Unexported fields can't be reflected by the Go runtime and therefore // copystructure can't perform any data copies. // // For structs, copy behavior can be controlled with struct tags. For example: // // struct { // Name string // Data *bytes.Buffer `copy:"shallow"` // } // // The available tag values are: // // * "ignore" - The field will be ignored, effectively resulting in it being // assigned the zero value in the copy. // // * "shallow" - The field will be be shallow copied. This means that references // values such as pointers, maps, slices, etc. will be directly assigned // versus deep copied. // func Copy(v interface{}) (interface{}, error) { return Config{}.Copy(v) } // CopierFunc is a function that knows how to deep copy a specific type. // Register these globally with the Copiers variable. type CopierFunc func(interface{}) (interface{}, error) // Copiers is a map of types that behave specially when they are copied. // If a type is found in this map while deep copying, this function // will be called to copy it instead of attempting to copy all fields. // // The key should be the type, obtained using: reflect.TypeOf(value with type). // // It is unsafe to write to this map after Copies have started. If you // are writing to this map while also copying, wrap all modifications to // this map as well as to Copy in a mutex. var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) // ShallowCopiers is a map of pointer types that behave specially // when they are copied. If a type is found in this map while deep // copying, the pointer value will be shallow copied and not walked // into. // // The key should be the type, obtained using: reflect.TypeOf(value // with type). // // It is unsafe to write to this map after Copies have started. If you // are writing to this map while also copying, wrap all modifications to // this map as well as to Copy in a mutex. var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) // Must is a helper that wraps a call to a function returning // (interface{}, error) and panics if the error is non-nil. It is intended // for use in variable initializations and should only be used when a copy // error should be a crashing case. func Must(v interface{}, err error) interface{} { if err != nil { panic("copy error: " + err.Error()) } return v } var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") type Config struct { // Lock any types that are a sync.Locker and are not a mutex while copying. // If there is an RLocker method, use that to get the sync.Locker. Lock bool // Copiers is a map of types associated with a CopierFunc. Use the global // Copiers map if this is nil. Copiers map[reflect.Type]CopierFunc // ShallowCopiers is a map of pointer types that when they are // shallow copied no matter where they are encountered. Use the // global ShallowCopiers if this is nil. ShallowCopiers map[reflect.Type]struct{} } func (c Config) Copy(v interface{}) (interface{}, error) { if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { return nil, errPointerRequired } w := new(walker) if c.Lock { w.useLocks = true } if c.Copiers == nil { c.Copiers = Copiers } w.copiers = c.Copiers if c.ShallowCopiers == nil { c.ShallowCopiers = ShallowCopiers } w.shallowCopiers = c.ShallowCopiers err := reflectwalk.Walk(v, w) if err != nil { return nil, err } // Get the result. If the result is nil, then we want to turn it // into a typed nil if we can. result := w.Result if result == nil { val := reflect.ValueOf(v) result = reflect.Indirect(reflect.New(val.Type())).Interface() } return result, nil } // Return the key used to index interfaces types we've seen. Store the number // of pointers in the upper 32bits, and the depth in the lower 32bits. This is // easy to calculate, easy to match a key with our current depth, and we don't // need to deal with initializing and cleaning up nested maps or slices. func ifaceKey(pointers, depth int) uint64 { return uint64(pointers)<<32 | uint64(depth) } type walker struct { Result interface{} copiers map[reflect.Type]CopierFunc shallowCopiers map[reflect.Type]struct{} depth int ignoreDepth int vals []reflect.Value cs []reflect.Value // This stores the number of pointers we've walked over, indexed by depth. ps []int // If an interface is indirected by a pointer, we need to know the type of // interface to create when creating the new value. Store the interface // types here, indexed by both the walk depth and the number of pointers // already seen at that depth. Use ifaceKey to calculate the proper uint64 // value. ifaceTypes map[uint64]reflect.Type // any locks we've taken, indexed by depth locks []sync.Locker // take locks while walking the structure useLocks bool } func (w *walker) Enter(l reflectwalk.Location) error { w.depth++ // ensure we have enough elements to index via w.depth for w.depth >= len(w.locks) { w.locks = append(w.locks, nil) } for len(w.ps) < w.depth+1 { w.ps = append(w.ps, 0) } return nil } func (w *walker) Exit(l reflectwalk.Location) error { locker := w.locks[w.depth] w.locks[w.depth] = nil if locker != nil { defer locker.Unlock() } // clear out pointers and interfaces as we exit the stack w.ps[w.depth] = 0 for k := range w.ifaceTypes { mask := uint64(^uint32(0)) if k&mask == uint64(w.depth) { delete(w.ifaceTypes, k) } } w.depth-- if w.ignoreDepth > w.depth { w.ignoreDepth = 0 } if w.ignoring() { return nil } switch l { case reflectwalk.Array: fallthrough case reflectwalk.Map: fallthrough case reflectwalk.Slice: w.replacePointerMaybe() // Pop map off our container w.cs = w.cs[:len(w.cs)-1] case reflectwalk.MapValue: // Pop off the key and value mv := w.valPop() mk := w.valPop() m := w.cs[len(w.cs)-1] // If mv is the zero value, SetMapIndex deletes the key form the map, // or in this case never adds it. We need to create a properly typed // zero value so that this key can be set. if !mv.IsValid() { mv = reflect.Zero(m.Elem().Type().Elem()) } m.Elem().SetMapIndex(mk, mv) case reflectwalk.ArrayElem: // Pop off the value and the index and set it on the array v := w.valPop() i := w.valPop().Interface().(int) if v.IsValid() { a := w.cs[len(w.cs)-1] ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call if ae.CanSet() { ae.Set(v) } } case reflectwalk.SliceElem: // Pop off the value and the index and set it on the slice v := w.valPop() i := w.valPop().Interface().(int) if v.IsValid() { s := w.cs[len(w.cs)-1] se := s.Elem().Index(i) if se.CanSet() { se.Set(v) } } case reflectwalk.Struct: w.replacePointerMaybe() // Remove the struct from the container stack w.cs = w.cs[:len(w.cs)-1] case reflectwalk.StructField: // Pop off the value and the field v := w.valPop() f := w.valPop().Interface().(reflect.StructField) if v.IsValid() { s := w.cs[len(w.cs)-1] sf := reflect.Indirect(s).FieldByName(f.Name) if sf.CanSet() { sf.Set(v) } } case reflectwalk.WalkLoc: // Clear out the slices for GC w.cs = nil w.vals = nil } return nil } func (w *walker) Map(m reflect.Value) error { if w.ignoring() { return nil } w.lock(m) // Create the map. If the map itself is nil, then just make a nil map var newMap reflect.Value if m.IsNil() { newMap = reflect.New(m.Type()) } else { newMap = wrapPtr(reflect.MakeMap(m.Type())) } w.cs = append(w.cs, newMap) w.valPush(newMap) return nil } func (w *walker) MapElem(m, k, v reflect.Value) error { return nil } func (w *walker) PointerEnter(v bool) error { if v { w.ps[w.depth]++ } return nil } func (w *walker) PointerExit(v bool) error { if v { w.ps[w.depth]-- } return nil } func (w *walker) Pointer(v reflect.Value) error { if _, ok := w.shallowCopiers[v.Type()]; ok { // Shallow copy this value. Use the same logic as primitive, then // return skip. if err := w.Primitive(v); err != nil { return err } return reflectwalk.SkipEntry } return nil } func (w *walker) Interface(v reflect.Value) error { if !v.IsValid() { return nil } if w.ifaceTypes == nil { w.ifaceTypes = make(map[uint64]reflect.Type) } w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() return nil } func (w *walker) Primitive(v reflect.Value) error { if w.ignoring() { return nil } w.lock(v) // IsValid verifies the v is non-zero and CanInterface verifies // that we're allowed to read this value (unexported fields). var newV reflect.Value if v.IsValid() && v.CanInterface() { newV = reflect.New(v.Type()) newV.Elem().Set(v) } w.valPush(newV) w.replacePointerMaybe() return nil } func (w *walker) Slice(s reflect.Value) error { if w.ignoring() { return nil } w.lock(s) var newS reflect.Value if s.IsNil() { newS = reflect.New(s.Type()) } else { newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) } w.cs = append(w.cs, newS) w.valPush(newS) return nil } func (w *walker) SliceElem(i int, elem reflect.Value) error { if w.ignoring() { return nil } // We don't write the slice here because elem might still be // arbitrarily complex. Just record the index and continue on. w.valPush(reflect.ValueOf(i)) return nil } func (w *walker) Array(a reflect.Value) error { if w.ignoring() { return nil } w.lock(a) newA := reflect.New(a.Type()) w.cs = append(w.cs, newA) w.valPush(newA) return nil } func (w *walker) ArrayElem(i int, elem reflect.Value) error { if w.ignoring() { return nil } // We don't write the array here because elem might still be // arbitrarily complex. Just record the index and continue on. w.valPush(reflect.ValueOf(i)) return nil } func (w *walker) Struct(s reflect.Value) error { if w.ignoring() { return nil } w.lock(s) var v reflect.Value if c, ok := w.copiers[s.Type()]; ok { // We have a Copier for this struct, so we use that copier to // get the copy, and we ignore anything deeper than this. w.ignoreDepth = w.depth dup, err := c(s.Interface()) if err != nil { return err } // We need to put a pointer to the value on the value stack, // so allocate a new pointer and set it. v = reflect.New(s.Type()) reflect.Indirect(v).Set(reflect.ValueOf(dup)) } else { // No copier, we copy ourselves and allow reflectwalk to guide // us deeper into the structure for copying. v = reflect.New(s.Type()) } // Push the value onto the value stack for setting the struct field, // and add the struct itself to the containers stack in case we walk // deeper so that its own fields can be modified. w.valPush(v) w.cs = append(w.cs, v) return nil } func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { if w.ignoring() { return nil } // If PkgPath is non-empty, this is a private (unexported) field. // We do not set this unexported since the Go runtime doesn't allow us. if f.PkgPath != "" { return reflectwalk.SkipEntry } switch f.Tag.Get(tagKey) { case "shallow": // If we're shallow copying then assign the value directly to the // struct and skip the entry. if v.IsValid() { s := w.cs[len(w.cs)-1] sf := reflect.Indirect(s).FieldByName(f.Name) if sf.CanSet() { sf.Set(v) } } return reflectwalk.SkipEntry case "ignore": // Do nothing return reflectwalk.SkipEntry } // Push the field onto the stack, we'll handle it when we exit // the struct field in Exit... w.valPush(reflect.ValueOf(f)) return nil } // ignore causes the walker to ignore any more values until we exit this on func (w *walker) ignore() { w.ignoreDepth = w.depth } func (w *walker) ignoring() bool { return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth } func (w *walker) pointerPeek() bool { return w.ps[w.depth] > 0 } func (w *walker) valPop() reflect.Value { result := w.vals[len(w.vals)-1] w.vals = w.vals[:len(w.vals)-1] // If we're out of values, that means we popped everything off. In // this case, we reset the result so the next pushed value becomes // the result. if len(w.vals) == 0 { w.Result = nil } return result } func (w *walker) valPush(v reflect.Value) { w.vals = append(w.vals, v) // If we haven't set the result yet, then this is the result since // it is the first (outermost) value we're seeing. if w.Result == nil && v.IsValid() { w.Result = v.Interface() } } func (w *walker) replacePointerMaybe() { // Determine the last pointer value. If it is NOT a pointer, then // we need to push that onto the stack. if !w.pointerPeek() { w.valPush(reflect.Indirect(w.valPop())) return } v := w.valPop() // If the expected type is a pointer to an interface of any depth, // such as *interface{}, **interface{}, etc., then we need to convert // the value "v" from *CONCRETE to *interface{} so types match for // Set. // // Example if v is type *Foo where Foo is a struct, v would become // *interface{} instead. This only happens if we have an interface expectation // at this depth. // // For more info, see GH-16 if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { y := reflect.New(iType) // Create *interface{} y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) v = y // v is now typed *interface{} (where *v = Foo) } for i := 1; i < w.ps[w.depth]; i++ { if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { iface := reflect.New(iType).Elem() iface.Set(v) v = iface } p := reflect.New(v.Type()) p.Elem().Set(v) v = p } w.valPush(v) } // if this value is a Locker, lock it and add it to the locks slice func (w *walker) lock(v reflect.Value) { if !w.useLocks { return } if !v.IsValid() || !v.CanInterface() { return } type rlocker interface { RLocker() sync.Locker } var locker sync.Locker // We can't call Interface() on a value directly, since that requires // a copy. This is OK, since the pointer to a value which is a sync.Locker // is also a sync.Locker. if v.Kind() == reflect.Ptr { switch l := v.Interface().(type) { case rlocker: // don't lock a mutex directly if _, ok := l.(*sync.RWMutex); !ok { locker = l.RLocker() } case sync.Locker: locker = l } } else if v.CanAddr() { switch l := v.Addr().Interface().(type) { case rlocker: // don't lock a mutex directly if _, ok := l.(*sync.RWMutex); !ok { locker = l.RLocker() } case sync.Locker: locker = l } } // still no callable locker if locker == nil { return } // don't lock a mutex directly switch locker.(type) { case *sync.Mutex, *sync.RWMutex: return } locker.Lock() w.locks[w.depth] = locker } // wrapPtr is a helper that takes v and always make it *v. copystructure // stores things internally as pointers until the last moment before unwrapping func wrapPtr(v reflect.Value) reflect.Value { if !v.IsValid() { return v } vPtr := reflect.New(v.Type()) vPtr.Elem().Set(v) return vPtr } copystructure-1.2.0/copystructure_examples_test.go000066400000000000000000000005451404455036700227000ustar00rootroot00000000000000package copystructure import ( "fmt" ) func ExampleCopy() { input := map[string]interface{}{ "bob": map[string]interface{}{ "emails": []string{"a", "b"}, }, } dup, err := Copy(input) if err != nil { panic(err) } fmt.Printf("%#v", dup) // Output: // map[string]interface {}{"bob":map[string]interface {}{"emails":[]string{"a", "b"}}} } copystructure-1.2.0/copystructure_test.go000066400000000000000000000463161404455036700210100ustar00rootroot00000000000000package copystructure import ( "fmt" "reflect" "sync" "testing" "time" "unsafe" ) func TestCopy_complex(t *testing.T) { v := map[string]interface{}{ "foo": []string{"a", "b"}, "bar": "baz", } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_interfacePointer(t *testing.T) { type Nested struct { Field string } type Test struct { Value *interface{} } ifacePtr := func(v interface{}) *interface{} { return &v } v := Test{ Value: ifacePtr(Nested{Field: "111"}), } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_primitive(t *testing.T) { cases := []interface{}{ 42, "foo", 1.2, } for _, tc := range cases { result, err := Copy(tc) if err != nil { t.Fatalf("err: %s", err) } if result != tc { t.Fatalf("bad: %#v", result) } } } func TestCopy_primitivePtr(t *testing.T) { i := 42 s := "foo" f := 1.2 cases := []interface{}{ &i, &s, &f, } for i, tc := range cases { result, err := Copy(tc) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, tc) { t.Fatalf("%d exptected: %#v\nbad: %#v", i, tc, result) } } } func TestCopy_map(t *testing.T) { v := map[string]interface{}{ "bar": "baz", } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_array(t *testing.T) { v := [2]string{"bar", "baz"} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_pointerToArray(t *testing.T) { v := &[2]string{"bar", "baz"} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_slice(t *testing.T) { v := []string{"bar", "baz"} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_pointerToSlice(t *testing.T) { v := &[]string{"bar", "baz"} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_pointerToMap(t *testing.T) { v := &map[string]string{"bar": "baz"} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_struct(t *testing.T) { type test struct { Value string } v := test{Value: "foo"} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_structPtr(t *testing.T) { type test struct { Value string } v := &test{Value: "foo"} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_structNil(t *testing.T) { type test struct { Value string } var v *test result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if v, ok := result.(*test); !ok { t.Fatalf("bad: %#v", result) } else if v != nil { t.Fatalf("bad: %#v", v) } } func TestCopy_structShallow(t *testing.T) { type test struct { Value string Value2 *string `copy:"shallow"` } value2 := "bar" value2ptr := &value2 v := test{Value: "foo", Value2: value2ptr} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } vcopy := result.(test) if vcopy.Value2 != v.Value2 { t.Fatal("should shallow copy the pointer") } } func TestCopy_structShallowWithUnsafe(t *testing.T) { type nested struct { v unsafe.Pointer } type test struct { Value string Value2 *nested `copy:"shallow"` } value2 := &nested{} v := test{Value: "foo", Value2: value2} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } vcopy := result.(test) if vcopy.Value2 != v.Value2 { t.Fatal("should shallow copy the pointer") } } func TestCopy_structIgnore(t *testing.T) { type test struct { Value string Value2 *string `copy:"ignore"` } value2 := "bar" value2ptr := &value2 v := test{Value: "foo", Value2: value2ptr} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } vcopy := result.(test) if vcopy.Value2 != nil { t.Fatal("should be nil") } } func TestCopy_structNested(t *testing.T) { type TestInner struct{} type Test struct { Test *TestInner } v := Test{} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_structWithNestedArray(t *testing.T) { type TestInner struct { Value string } type Test struct { Value [2]TestInner } v := Test{ Value: [2]TestInner{ {Value: "bar"}, {Value: "baz"}, }, } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_structWithPointerToSliceField(t *testing.T) { type Test struct { Value *[]string } v := Test{ Value: &[]string{"bar", "baz"}, } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_structWithPointerToArrayField(t *testing.T) { type Test struct { Value *[2]string } v := Test{ Value: &[2]string{"bar", "baz"}, } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_structWithPointerToMapField(t *testing.T) { type Test struct { Value *map[string]string } v := Test{ Value: &map[string]string{"bar": "baz"}, } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_structUnexported(t *testing.T) { type test struct { Value string private string } v := test{Value: "foo"} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_structUnexportedMap(t *testing.T) { type Sub struct { Foo map[string]interface{} } type test struct { Value string private Sub } v := test{ Value: "foo", private: Sub{ Foo: map[string]interface{}{ "yo": 42, }, }, } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } // private should not be copied v.private = Sub{} if !reflect.DeepEqual(result, v) { t.Fatalf("bad:\n\n%#v\n\n%#v", result, v) } } func TestCopy_structUnexportedArray(t *testing.T) { type Sub struct { Foo [2]string } type test struct { Value string private Sub } v := test{ Value: "foo", private: Sub{ Foo: [2]string{"bar", "baz"}, }, } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } // private should not be copied v.private = Sub{} if !reflect.DeepEqual(result, v) { t.Fatalf("bad:\n\n%#v\n\n%#v", result, v) } } // This is testing an unexported field containing a slice of pointers, which // was a crashing case found in Terraform. func TestCopy_structUnexportedPtrMap(t *testing.T) { type Foo interface{} type Sub struct { List []Foo } type test struct { Value string private *Sub } v := test{ Value: "foo", private: &Sub{ List: []Foo{&Sub{}}, }, } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } // private should not be copied v.private = nil if !reflect.DeepEqual(result, v) { t.Fatalf("bad:\n\n%#v\n\n%#v", result, v) } } func TestCopy_nestedStructUnexported(t *testing.T) { type subTest struct { mine string } type test struct { Value string private subTest } v := test{Value: "foo"} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_time(t *testing.T) { type test struct { Value time.Time } v := test{Value: time.Now().UTC()} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } func TestCopy_aliased(t *testing.T) { type ( Int int Str string Map map[Int]interface{} Slice []Str ) v := Map{ 1: Map{10: 20}, 2: Map(nil), 3: Slice{"a", "b"}, } result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } type EmbeddedLocker struct { sync.Mutex Map map[int]int } func TestCopy_embeddedLocker(t *testing.T) { v := &EmbeddedLocker{ Map: map[int]int{42: 111}, } // start locked to prevent copying v.Lock() var result interface{} var err error copied := make(chan bool) go func() { result, err = Config{Lock: true}.Copy(v) close(copied) }() // pause slightly to make sure copying is blocked select { case <-copied: t.Fatal("copy completed while locked!") case <-time.After(100 * time.Millisecond): v.Unlock() } <-copied // test that the mutex is in the correct state result.(*EmbeddedLocker).Lock() result.(*EmbeddedLocker).Unlock() if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } // this will trigger the race detector, and usually panic if the original // struct isn't properly locked during Copy func TestCopy_lockRace(t *testing.T) { v := &EmbeddedLocker{ Map: map[int]int{}, } var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) go func() { defer wg.Done() for i := 0; i < 100; i++ { v.Lock() v.Map[i] = i v.Unlock() } }() wg.Add(1) go func() { defer wg.Done() Config{Lock: true}.Copy(v) }() } wg.Wait() result, err := Config{Lock: true}.Copy(v) // test that the mutex is in the correct state result.(*EmbeddedLocker).Lock() result.(*EmbeddedLocker).Unlock() if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } type LockedField struct { String string Locker *EmbeddedLocker // this should not get locked or have its state copied Mutex sync.Mutex nilMutex *sync.Mutex } func TestCopy_lockedField(t *testing.T) { v := &LockedField{ String: "orig", Locker: &EmbeddedLocker{ Map: map[int]int{42: 111}, }, } // start locked to prevent copying v.Locker.Lock() v.Mutex.Lock() var result interface{} var err error copied := make(chan bool) go func() { result, err = Config{Lock: true}.Copy(v) close(copied) }() // pause slightly to make sure copying is blocked select { case <-copied: t.Fatal("copy completed while locked!") case <-time.After(100 * time.Millisecond): v.Locker.Unlock() } <-copied // test that the mutexes are in the correct state result.(*LockedField).Locker.Lock() result.(*LockedField).Locker.Unlock() result.(*LockedField).Mutex.Lock() result.(*LockedField).Mutex.Unlock() // this wasn't blocking, but should be unlocked for DeepEqual v.Mutex.Unlock() if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("expected:\n%#v\nbad:\n%#v\n", v, result) } } // test something that doesn't contain a lock internally type lockedMap map[int]int var mapLock sync.Mutex func (m lockedMap) Lock() { mapLock.Lock() } func (m lockedMap) Unlock() { mapLock.Unlock() } func TestCopy_lockedMap(t *testing.T) { v := lockedMap{1: 2} v.Lock() var result interface{} var err error copied := make(chan bool) go func() { result, err = Config{Lock: true}.Copy(&v) close(copied) }() // pause slightly to make sure copying is blocked select { case <-copied: t.Fatal("copy completed while locked!") case <-time.After(100 * time.Millisecond): v.Unlock() } <-copied // test that the mutex is in the correct state result.(*lockedMap).Lock() result.(*lockedMap).Unlock() if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, &v) { t.Fatalf("bad: %#v", result) } } // Use an RLock if available type RLocker struct { sync.RWMutex Map map[int]int } func TestCopy_rLocker(t *testing.T) { v := &RLocker{ Map: map[int]int{1: 2}, } v.Lock() var result interface{} var err error copied := make(chan bool) go func() { result, err = Config{Lock: true}.Copy(v) close(copied) }() // pause slightly to make sure copying is blocked select { case <-copied: t.Fatal("copy completed while locked!") case <-time.After(100 * time.Millisecond): v.Unlock() } <-copied // test that the mutex is in the correct state vCopy := result.(*RLocker) vCopy.Lock() vCopy.Unlock() vCopy.RLock() vCopy.RUnlock() // now make sure we can copy during an RLock v.RLock() result, err = Config{Lock: true}.Copy(v) if err != nil { t.Fatal(err) } v.RUnlock() vCopy = result.(*RLocker) vCopy.Lock() vCopy.Unlock() vCopy.RLock() vCopy.RUnlock() if !reflect.DeepEqual(result, v) { t.Fatalf("bad: %#v", result) } } // Test that we don't panic when encountering nil Lockers func TestCopy_missingLockedField(t *testing.T) { v := &LockedField{ String: "orig", } result, err := Config{Lock: true}.Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("expected:\n%#v\nbad:\n%#v\n", v, result) } } type PointerLocker struct { Mu sync.Mutex } func (p *PointerLocker) Lock() { p.Mu.Lock() } func (p *PointerLocker) Unlock() { p.Mu.Unlock() } func TestCopy_pointerLockerNil(t *testing.T) { v := struct { P *PointerLocker }{} _, err := Config{Lock: true}.Copy(&v) if err != nil { t.Fatalf("err: %s", err) } } func TestCopy_sliceWithNil(t *testing.T) { v := [](*int){nil} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("expected:\n%#v\ngot:\n%#v", v, result) } } func TestCopy_mapWithNil(t *testing.T) { v := map[int](*int){0: nil} result, err := Copy(v) if err != nil { t.Fatalf("err: %s", err) } if !reflect.DeepEqual(result, v) { t.Fatalf("expected:\n%#v\ngot:\n%#v", v, result) } } // While this is safe to lock and copy directly, copystructure requires a // pointer to reflect the value safely. func TestCopy_valueWithLockPointer(t *testing.T) { v := struct { *sync.Mutex X int }{ Mutex: &sync.Mutex{}, X: 3, } _, err := Config{Lock: true}.Copy(v) if err != errPointerRequired { t.Fatalf("expected errPointerRequired, got: %v", err) } } func TestCopy_mapWithPointers(t *testing.T) { type T struct { S string } v := map[string]interface{}{ "a": &T{S: "hello"}, } result, err := Copy(v) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(v, result) { t.Fatalf("%#v", result) } } func TestCopy_structWithMapWithPointers(t *testing.T) { type T struct { S string M map[string]interface{} } v := &T{ S: "a", M: map[string]interface{}{ "b": &T{ S: "b", }, }, } result, err := Copy(v) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(v, result) { t.Fatal(result) } } type testT struct { N int Spp **string X testX Xp *testX Xpp **testX } type testX struct { Tp *testT Tpp **testT Ip *interface{} Ep *error S fmt.Stringer } type stringer struct{} func (s *stringer) String() string { return "test string" } func TestCopy_structWithPointersAndInterfaces(t *testing.T) { // test that we can copy various nested and chained pointers and interfaces s := "val" sp := &s spp := &sp i := interface{}(11) tp := &testT{ N: 2, } xp := &testX{ Tp: tp, Tpp: &tp, Ip: &i, S: &stringer{}, } v := &testT{ N: 1, Spp: spp, X: testX{}, Xp: xp, Xpp: &xp, } result, err := Copy(v) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(v, result) { t.Fatal(result) } } func Test_pointerInterfacePointer(t *testing.T) { s := "hi" si := interface{}(&s) sip := &si result, err := Copy(sip) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(sip, result) { t.Fatalf("%#v != %#v\n", sip, result) } } func Test_pointerInterfacePointer2(t *testing.T) { type T struct { I *interface{} J **fmt.Stringer } x := 1 y := &stringer{} i := interface{}(&x) j := fmt.Stringer(y) jp := &j v := &T{ I: &i, J: &jp, } result, err := Copy(v) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(v, result) { t.Fatalf("%#v != %#v\n", v, result) } } // This test catches a bug that happened when unexported fields were // first their subsequent fields wouldn't be copied. func TestCopy_unexportedFieldFirst(t *testing.T) { type P struct { mu sync.Mutex Old, New string } type T struct { M map[string]*P } v := &T{ M: map[string]*P{ "a": &P{Old: "", New: "2"}, }, } result, err := Copy(v) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(v, result) { t.Fatalf("\n%#v\n\n%#v", v, result) } } func TestCopy_nilPointerInSlice(t *testing.T) { type T struct { Ps []*int } v := &T{ Ps: []*int{nil}, } result, err := Copy(v) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(v, result) { t.Fatalf("\n%#v\n\n%#v", v, result) } } //------------------------------------------------------------------- // The tests below all tests various pointer cases around copying // a structure that uses a defined Copier. This was originally raised // around issue #26. func TestCopy_timePointer(t *testing.T) { type T struct { Value *time.Time } now := time.Now() v := &T{ Value: &now, } result, err := Copy(v) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(v, result) { t.Fatalf("\n%#v\n\n%#v", v, result) } } func TestCopy_timeNonPointer(t *testing.T) { type T struct { Value time.Time } v := &T{ Value: time.Now(), } result, err := Copy(v) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(v, result) { t.Fatalf("\n%#v\n\n%#v", v, result) } } func TestCopy_timeDoublePointer(t *testing.T) { type T struct { Value **time.Time } now := time.Now() nowP := &now nowPP := &nowP v := &T{ Value: nowPP, } result, err := Copy(v) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(v, result) { t.Fatalf("\n%#v\n\n%#v", v, result) } } type nestedValue struct { v string } func TestCopy_customCopierConfig(t *testing.T) { type T struct { Val *nestedValue } v := &T{ Val: &nestedValue{v: "original"}, } cfg := Config{ Copiers: map[reflect.Type]CopierFunc{ reflect.TypeOf(nestedValue{}): customCopier, }, } result, err := cfg.Copy(v) if err != nil { t.Fatal(err) } copiedVal := result.(*T) if !reflect.DeepEqual(v.Val.v, copiedVal.Val.v) { t.Fatalf("\nexpected: %#v\ngiven: %#v", v.Val.v, copiedVal.Val.v) } } func customCopier(v interface{}) (interface{}, error) { return v.(nestedValue), nil } func TestCopy_customCopierShallowValue(t *testing.T) { type T struct{} v := &T{} cfg := Config{ ShallowCopiers: map[reflect.Type]struct{}{ reflect.TypeOf(T{}): struct{}{}, }, } result, err := cfg.Copy(v) if err != nil { t.Fatal(err) } copiedVal := result.(*T) if v != copiedVal { t.Fatal("value not shallow copied") } } copystructure-1.2.0/go.mod000066400000000000000000000001441404455036700155620ustar00rootroot00000000000000module github.com/mitchellh/copystructure go 1.15 require github.com/mitchellh/reflectwalk v1.0.2 copystructure-1.2.0/go.sum000066400000000000000000000002671404455036700156150ustar00rootroot00000000000000github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=