pax_global_header00006660000000000000000000000064146102263550014516gustar00rootroot0000000000000052 comment=0167142bde7ad9f5181a6a92eafe1f3c0c2af9fb golang-github-dennwc-btrfs-0.0~git20240418.0167142/000077500000000000000000000000001461022635500210505ustar00rootroot00000000000000golang-github-dennwc-btrfs-0.0~git20240418.0167142/LICENSE000066400000000000000000000261351461022635500220640ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. golang-github-dennwc-btrfs-0.0~git20240418.0167142/README.md000066400000000000000000000000431461022635500223240ustar00rootroot00000000000000# btrfs Btrfs library in a pure Go golang-github-dennwc-btrfs-0.0~git20240418.0167142/btrfs.go000066400000000000000000000152421461022635500225230ustar00rootroot00000000000000package btrfs import ( "fmt" "io" "os" "path/filepath" "strconv" "syscall" "github.com/dennwc/ioctl" ) const SuperMagic uint32 = 0x9123683E func CloneFile(dst, src *os.File) error { return iocClone(dst, src) } func Open(path string, ro bool) (*FS, error) { if ok, err := IsSubVolume(path); err != nil { return nil, err } else if !ok { return nil, ErrNotBtrfs{Path: path} } var ( dir *os.File err error ) if ro { dir, err = os.OpenFile(path, os.O_RDONLY|syscall.O_NOATIME, 0644) if err != nil { // Try without O_NOATIME as it requires ownership of the file // or other priviliges dir, err = os.OpenFile(path, os.O_RDONLY, 0644) } } else { dir, err = os.Open(path) } if err != nil { return nil, err } else if st, err := dir.Stat(); err != nil { dir.Close() return nil, err } else if !st.IsDir() { dir.Close() return nil, fmt.Errorf("not a directory: %s", path) } return &FS{f: dir}, nil } type FS struct { f *os.File } func (f *FS) Close() error { return f.f.Close() } type Info struct { MaxID uint64 NumDevices uint64 FSID FSID NodeSize uint32 SectorSize uint32 CloneAlignment uint32 } func (f *FS) SubVolumeID() (uint64, error) { id, err := getFileRootID(f.f) if err != nil { return 0, err } return uint64(id), nil } func (f *FS) Info() (out Info, err error) { var arg btrfs_ioctl_fs_info_args arg, err = iocFsInfo(f.f) if err == nil { out = Info{ MaxID: arg.max_id, NumDevices: arg.num_devices, FSID: arg.fsid, NodeSize: arg.nodesize, SectorSize: arg.sectorsize, CloneAlignment: arg.clone_alignment, } } return } type DevInfo struct { UUID UUID BytesUsed uint64 TotalBytes uint64 Path string } func (f *FS) GetDevInfo(id uint64) (out DevInfo, err error) { var arg btrfs_ioctl_dev_info_args arg.devid = id if err = ioctl.Do(f.f, _BTRFS_IOC_DEV_INFO, &arg); err != nil { return } out.UUID = arg.uuid out.BytesUsed = arg.bytes_used out.TotalBytes = arg.total_bytes out.Path = stringFromBytes(arg.path[:]) return } type DevStats struct { WriteErrs uint64 ReadErrs uint64 FlushErrs uint64 // Checksum error, bytenr error or contents is illegal: this is an // indication that the block was damaged during read or write, or written to // wrong location or read from wrong location. CorruptionErrs uint64 // An indication that blocks have not been written. GenerationErrs uint64 Unknown []uint64 } func (f *FS) GetDevStats(id uint64) (out DevStats, err error) { var arg btrfs_ioctl_get_dev_stats arg.devid = id arg.nr_items = _BTRFS_DEV_STAT_VALUES_MAX arg.flags = 0 if err = ioctl.Do(f.f, _BTRFS_IOC_GET_DEV_STATS, &arg); err != nil { return } i := 0 out.WriteErrs = arg.values[i] i++ out.ReadErrs = arg.values[i] i++ out.FlushErrs = arg.values[i] i++ out.CorruptionErrs = arg.values[i] i++ out.GenerationErrs = arg.values[i] i++ if int(arg.nr_items) > i { out.Unknown = arg.values[i:arg.nr_items] } return } type FSFeatureFlags struct { Compatible FeatureFlags CompatibleRO FeatureFlags Incompatible IncompatFeatures } func (f *FS) GetFeatures() (out FSFeatureFlags, err error) { var arg btrfs_ioctl_feature_flags if err = ioctl.Do(f.f, _BTRFS_IOC_GET_FEATURES, &arg); err != nil { return } out = FSFeatureFlags{ Compatible: arg.compat_flags, CompatibleRO: arg.compat_ro_flags, Incompatible: arg.incompat_flags, } return } func (f *FS) GetSupportedFeatures() (out FSFeatureFlags, err error) { var arg [3]btrfs_ioctl_feature_flags if err = ioctl.Do(f.f, _BTRFS_IOC_GET_SUPPORTED_FEATURES, &arg); err != nil { return } out = FSFeatureFlags{ Compatible: arg[0].compat_flags, CompatibleRO: arg[0].compat_ro_flags, Incompatible: arg[0].incompat_flags, } //for i, a := range arg { // out[i] = FSFeatureFlags{ // Compatible: a.compat_flags, // CompatibleRO: a.compat_ro_flags, // Incompatible: a.incompat_flags, // } //} return } func (f *FS) GetFlags() (SubvolFlags, error) { return iocSubvolGetflags(f.f) } func (f *FS) SetFlags(flags SubvolFlags) error { return iocSubvolSetflags(f.f, flags) } func (f *FS) Sync() (err error) { if err = ioctl.Ioctl(f.f, _BTRFS_IOC_START_SYNC, 0); err != nil { return } return ioctl.Ioctl(f.f, _BTRFS_IOC_WAIT_SYNC, 0) } func (f *FS) CreateSubVolume(name string) error { return CreateSubVolume(filepath.Join(f.f.Name(), name)) } func (f *FS) DeleteSubVolume(name string) error { return DeleteSubVolume(filepath.Join(f.f.Name(), name)) } func (f *FS) Snapshot(dst string, ro bool) error { return SnapshotSubVolume(f.f.Name(), filepath.Join(f.f.Name(), dst), ro) } func (f *FS) SnapshotSubVolume(name string, dst string, ro bool) error { return SnapshotSubVolume(filepath.Join(f.f.Name(), name), filepath.Join(f.f.Name(), dst), ro) } func (f *FS) Send(w io.Writer, parent string, subvols ...string) error { if parent != "" { parent = filepath.Join(f.f.Name(), parent) } sub := make([]string, 0, len(subvols)) for _, s := range subvols { sub = append(sub, filepath.Join(f.f.Name(), s)) } return Send(w, parent, sub...) } func (f *FS) Receive(r io.Reader) error { return Receive(r, f.f.Name()) } func (f *FS) ReceiveTo(r io.Reader, mount string) error { return Receive(r, filepath.Join(f.f.Name(), mount)) } func (f *FS) ListSubvolumes(filter func(SubvolInfo) bool) ([]SubvolInfo, error) { m, err := listSubVolumes(f.f, filter) if err != nil { return nil, err } out := make([]SubvolInfo, 0, len(m)) for _, v := range m { out = append(out, v) } return out, nil } func (f *FS) SubvolumeByUUID(uuid UUID) (*SubvolInfo, error) { id, err := lookupUUIDSubvolItem(f.f, uuid) if err != nil { return nil, err } return subvolSearchByRootID(f.f, id, "") } func (f *FS) SubvolumeByReceivedUUID(uuid UUID) (*SubvolInfo, error) { id, err := lookupUUIDReceivedSubvolItem(f.f, uuid) if err != nil { return nil, err } return subvolSearchByRootID(f.f, id, "") } func (f *FS) SubvolumeByPath(path string) (*SubvolInfo, error) { return subvolSearchByPath(f.f, path) } func (f *FS) Usage() (UsageInfo, error) { return spaceUsage(f.f) } func (f *FS) Balance(flags BalanceFlags) (BalanceProgress, error) { args := btrfs_ioctl_balance_args{flags: flags} err := iocBalanceV2(f.f, &args) return args.stat, err } func (f *FS) Resize(size int64) error { amount := strconv.FormatInt(size, 10) args := &btrfs_ioctl_vol_args{} args.SetName(amount) if err := iocResize(f.f, args); err != nil { return fmt.Errorf("resize failed: %v", err) } return nil } func (f *FS) ResizeToMax() error { args := &btrfs_ioctl_vol_args{} args.SetName("max") if err := iocResize(f.f, args); err != nil { return fmt.Errorf("resize failed: %v", err) } return nil } golang-github-dennwc-btrfs-0.0~git20240418.0167142/btrfs_h.go000066400000000000000000000034761461022635500230400ustar00rootroot00000000000000package btrfs import "strings" const maxUint64 = 1<<64 - 1 const labelSize = 256 type FeatureFlags uint64 const ( FeatureCompatROFreeSpaceTree = FeatureFlags(1 << 0) ) type IncompatFeatures uint64 func (f IncompatFeatures) String() string { var s []string for i, name := range incompatFeatureNames { if uint64(f)&uint64(i) != 0 { s = append(s, name) } } return strings.Join(s, ",") } var incompatFeatureNames = []string{ "DefaultSubvol", "MixedGroups", "CompressLZO", "CompressLZOv2", "BigMetadata", "ExtendedIRef", "RAID56", "SkinnyMetadata", "NoHoles", } const ( FeatureIncompatMixedBackRef = IncompatFeatures(1 << 0) FeatureIncompatDefaultSubvol = IncompatFeatures(1 << 1) FeatureIncompatMixedGroups = IncompatFeatures(1 << 2) FeatureIncompatCompressLZO = IncompatFeatures(1 << 3) // Some patches floated around with a second compression method // lets save that incompat here for when they do get in. // Note we don't actually support it, we're just reserving the number. FeatureIncompatCompressLZOv2 = IncompatFeatures(1 << 4) // Older kernels tried to do bigger metadata blocks, but the // code was pretty buggy. Lets not let them try anymore. FeatureIncompatBigMetadata = IncompatFeatures(1 << 5) FeatureIncompatExtendedIRef = IncompatFeatures(1 << 6) FeatureIncompatRAID56 = IncompatFeatures(1 << 7) FeatureIncompatSkinnyMetadata = IncompatFeatures(1 << 8) FeatureIncompatNoHoles = IncompatFeatures(1 << 9) ) // Flags definition for balance. type BalanceFlags uint64 // Restriper's general type filter. const ( BalanceData = BalanceFlags(1 << 0) BalanceSystem = BalanceFlags(1 << 1) BalanceMetadata = BalanceFlags(1 << 2) BalanceMask = (BalanceData | BalanceSystem | BalanceMetadata) BalanceForce = BalanceFlags(1 << 3) BalanceResume = BalanceFlags(1 << 4) ) golang-github-dennwc-btrfs-0.0~git20240418.0167142/btrfs_list.go000066400000000000000000000006411461022635500235530ustar00rootroot00000000000000package btrfs import "os" func getFileRootID(file *os.File) (objectID, error) { args := btrfs_ioctl_ino_lookup_args{ objectid: firstFreeObjectid, } if err := iocInoLookup(file, &args); err != nil { return 0, err } return args.treeid, nil } func getPathRootID(path string) (objectID, error) { fs, err := Open(path, true) if err != nil { return 0, err } defer fs.Close() return getFileRootID(fs.f) } golang-github-dennwc-btrfs-0.0~git20240418.0167142/btrfs_test.go000066400000000000000000000124171461022635500235630ustar00rootroot00000000000000package btrfs import ( "github.com/dennwc/btrfs/test" "io" "io/ioutil" "os" "path/filepath" "reflect" "sort" "testing" ) const sizeDef = 256 * 1024 * 1024 func TestOpen(t *testing.T) { dir, closer := btrfstest.New(t, sizeDef) defer closer() fs, err := Open(dir, true) if err != nil { t.Fatal(err) } if err = fs.Close(); err != nil { t.Fatal(err) } } func TestIsSubvolume(t *testing.T) { dir, closer := btrfstest.New(t, sizeDef) defer closer() isSubvol := func(path string, expect bool) { ok, err := IsSubVolume(path) if err != nil { t.Errorf("failed to check subvolume %v: %v", path, err) return } else if ok != expect { t.Errorf("unexpected result for %v", path) } } mkdir := func(path string) { path = filepath.Join(dir, path) if err := os.MkdirAll(path, 0755); err != nil { t.Fatalf("cannot create dir %v: %v", path, err) } isSubvol(path, false) } mksub := func(path string) { path = filepath.Join(dir, path) if err := CreateSubVolume(path); err != nil { t.Fatalf("cannot create subvolume %v: %v", path, err) } isSubvol(path, true) } mksub("v1") mkdir("v1/d2") mksub("v1/v2") mkdir("v1/d2/d3") mksub("v1/d2/v3") mkdir("v1/v2/d3") mksub("v1/v2/v3") mkdir("d1") mkdir("d1/d2") mksub("d1/v2") mkdir("d1/d2/d3") mksub("d1/d2/v3") mkdir("d1/v2/d3") mksub("d1/v2/v3") } func TestSubvolumes(t *testing.T) { dir, closer := btrfstest.New(t, sizeDef) defer closer() fs, err := Open(dir, false) if err != nil { t.Fatal(err) } defer fs.Close() mksub := func(in string, path string) { if in != "" { path = filepath.Join(dir, in, path) } else { path = filepath.Join(dir, path) } if err := CreateSubVolume(path); err != nil { t.Fatalf("cannot create subvolume %v: %v", path, err) } } delsub := func(path string) { path = filepath.Join(dir, path) if err := DeleteSubVolume(path); err != nil { t.Fatalf("cannot delete subvolume %v: %v", path, err) } } expect := func(exp []string) { subs, err := fs.ListSubvolumes(nil) if err != nil { t.Fatal(err) } var got []string for _, s := range subs { if s.UUID.IsZero() { t.Fatalf("zero uuid in %+v", s) } if s.Path != "" { got = append(got, s.Path) } } sort.Strings(got) sort.Strings(exp) if !reflect.DeepEqual(got, exp) { t.Fatalf("list failed:\ngot: %v\nvs\nexp: %v", got, exp) } } names := []string{"foo", "bar", "baz"} for _, name := range names { mksub("", name) } for _, name := range names { mksub(names[0], name) } expect([]string{ "foo", "bar", "baz", "foo/foo", "foo/bar", "foo/baz", }) delsub("foo/bar") expect([]string{ "foo", "bar", "baz", "foo/foo", "foo/baz", }) path := filepath.Join(names[0], names[2]) mksub(path, "new") path = filepath.Join(path, "new") id, err := getPathRootID(filepath.Join(dir, path)) if err != nil { t.Fatal(err) } info, err := subvolSearchByRootID(fs.f, id, "") if err != nil { t.Fatal(err) } else if info.Path != path { t.Fatalf("wrong path returned: %v vs %v", info.Path, path) } } func TestCompression(t *testing.T) { dir, closer := btrfstest.New(t, sizeDef) defer closer() fs, err := Open(dir, true) if err != nil { t.Fatal(err) } defer fs.Close() if err := fs.CreateSubVolume("sub"); err != nil { t.Fatal(err) } path := filepath.Join(dir, "sub") if err := SetCompression(path, LZO); err != nil { t.Fatal(err) } if c, err := GetCompression(path); err != nil { t.Fatal(err) } else if c != LZO { t.Fatalf("unexpected compression returned: %q", string(c)) } } func TestCloneFile(t *testing.T) { dir, closer := btrfstest.New(t, sizeDef) defer closer() f1, err := os.Create(filepath.Join(dir, "1.dat")) if err != nil { t.Fatal(err) } defer f1.Close() const data = "btrfs_test" _, err = f1.WriteString(data) if err != nil { t.Fatal(err) } f2, err := os.Create(filepath.Join(dir, "2.dat")) if err != nil { t.Fatal(err) } defer f2.Close() err = CloneFile(f2, f1) if err != nil { t.Fatal(err) } buf := make([]byte, len(data)) n, err := f2.Read(buf) if err != nil && err != io.EOF { t.Fatal(err) } buf = buf[:n] if string(buf) != data { t.Fatalf("wrong data returned: %q", string(buf)) } } func TestResize(t *testing.T) { dir, err := ioutil.TempDir("", "btrfs_data_") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) fname := filepath.Join(dir, "data") if err = btrfstest.Mkfs(fname, sizeDef); err != nil { t.Fatal(err) } mnt := filepath.Join(dir, "mnt") if err = os.MkdirAll(mnt, 0755); err != nil { t.Fatal(err) } if err = btrfstest.Mount(mnt, fname); err != nil { t.Fatal(err) } defer btrfstest.Unmount(mnt) fs, err := Open(mnt, false) if err != nil { t.Fatal(err) } st, err := fs.Usage() fs.Close() if err != nil { t.Fatal(err) } if err = btrfstest.Unmount(mnt); err != nil { t.Fatal(err) } var newSize int64 = sizeDef newSize = int64(float64(newSize) * 1.1) if err = os.Truncate(fname, newSize); err != nil { t.Fatal(err) } if err = btrfstest.Mount(mnt, fname); err != nil { t.Fatal(err) } fs, err = Open(mnt, false) if err != nil { t.Fatal(err) } defer fs.Close() if err = fs.ResizeToMax(); err != nil { t.Fatal(err) } st2, err := fs.Usage() if err != nil { t.Fatal(err) } else if st.Total >= st2.Total { t.Fatal("to resized:", st.Total, st2.Total) } } golang-github-dennwc-btrfs-0.0~git20240418.0167142/btrfs_tree.go000066400000000000000000000167441461022635500235520ustar00rootroot00000000000000package btrfs import ( "fmt" "time" "unsafe" ) const ( _BTRFS_BLOCK_GROUP_TYPE_MASK = (blockGroupData | blockGroupSystem | blockGroupMetadata) _BTRFS_BLOCK_GROUP_PROFILE_MASK = (blockGroupRaid0 | blockGroupRaid1 | blockGroupRaid5 | blockGroupRaid6 | blockGroupDup | blockGroupRaid10) _BTRFS_BLOCK_GROUP_MASK = _BTRFS_BLOCK_GROUP_TYPE_MASK | _BTRFS_BLOCK_GROUP_PROFILE_MASK ) type rootRef struct { DirID objectID Sequence uint64 Name string } func (rootRef) btrfsSize() int { return 18 } func asUint64(p []byte) uint64 { return *(*uint64)(unsafe.Pointer(&p[0])) } func asUint32(p []byte) uint32 { return *(*uint32)(unsafe.Pointer(&p[0])) } func asUint16(p []byte) uint16 { return *(*uint16)(unsafe.Pointer(&p[0])) } func asRootRef(p []byte) rootRef { const sz = 18 // assuming that it is highly unsafe to have sizeof(struct) > len(data) // (*btrfs_root_ref)(unsafe.Pointer(&p[0])) and sizeof(btrfs_root_ref) == 24 ref := rootRef{ DirID: objectID(asUint64(p[0:])), Sequence: asUint64(p[8:]), } if n := asUint16(p[16:]); n > 0 { ref.Name = string(p[sz : sz+n : sz+n]) } return ref } var treeKeyNames = map[treeKeyType]string{ inodeItemKey: "inodeItem", inodeRefKey: "inodeRef", inodeExtrefKey: "inodeExtref", xattrItemKey: "xattrItemKey", orphanItemKey: "orphanItem", dirLogItemKey: "dirLogItem", dirLogIndexKey: "dirLogIndex", dirItemKey: "dirItem", dirIndexKey: "dirIndex", extentDataKey: "extentData", extentCsumKey: "extentCsum", rootItemKey: "rootItem", rootBackrefKey: "rootBackref", rootRefKey: "rootRef", extentItemKey: "extentItem", metadataItemKey: "metadataItem", treeBlockRefKey: "treeBlockRef", extentDataRefKey: "extentDataRef", extentRefV0Key: "extentRefV0", sharedBlockRefKey: "sharedBlockRef", sharedDataRefKey: "sharedDataRef", blockGroupItemKey: "blockGroupItem", freeSpaceInfoKey: "freeSpaceInfo", freeSpaceExtentKey: "freeSpaceExtent", freeSpaceBitmapKey: "freeSpaceBitmap", devExtentKey: "devExtent", devItemKey: "devItem", chunkItemKey: "chunkItem", qgroupStatusKey: "qgroupStatus", qgroupInfoKey: "qgroupInfo", qgroupLimitKey: "qgroupLimit", qgroupRelationKey: "qgroupRelation", temporaryItemKey: "temporaryItem", persistentItemKey: "persistentItem", devReplaceKey: "devReplace", uuidKeySubvol: "uuidKeySubvol", uuidKeyReceivedSubvol: "uuidKeyReceivedSubvol", stringItemKey: "stringItem", } func (t treeKeyType) String() string { if name, ok := treeKeyNames[t]; ok { return name } return fmt.Sprintf("%#x", int(t)) } // btrfs_disk_key_raw is a raw bytes for btrfs_disk_key structure type btrfs_disk_key_raw [17]byte func (p btrfs_disk_key_raw) Decode() diskKey { return diskKey{ ObjectID: asUint64(p[0:]), Type: p[8], Offset: asUint64(p[9:]), } } type diskKey struct { ObjectID uint64 Type byte Offset uint64 } // btrfs_timespec_raw is a raw bytes for btrfs_timespec structure. type btrfs_timespec_raw [12]byte func (t btrfs_timespec_raw) Decode() time.Time { sec, nsec := asUint64(t[0:]), asUint32(t[8:]) return time.Unix(int64(sec), int64(nsec)) } // timeBlock is a raw set of bytes for 4 time fields. // It is used to keep correct alignment when accessing structures from btrfs. type timeBlock [4]btrfs_timespec_raw type btrfs_inode_item_raw struct { generation uint64 transid uint64 size uint64 nbytes uint64 block_group uint64 nlink uint32 uid uint32 gid uint32 mode uint32 rdev uint64 flags uint64 sequence uint64 _ [4]uint64 // reserved // atime btrfs_timespec // ctime btrfs_timespec // mtime btrfs_timespec // otime btrfs_timespec times timeBlock } func (v btrfs_inode_item_raw) Decode() inodeItem { return inodeItem{ Gen: v.generation, TransID: v.transid, Size: v.size, NBytes: v.nbytes, BlockGroup: v.block_group, NLink: v.nlink, UID: v.uid, GID: v.gid, Mode: v.mode, RDev: v.rdev, Flags: v.flags, Sequence: v.sequence, ATime: v.times[0].Decode(), CTime: v.times[1].Decode(), MTime: v.times[2].Decode(), OTime: v.times[3].Decode(), } } type inodeItem struct { Gen uint64 // nfs style generation number TransID uint64 // transid that last touched this inode Size uint64 NBytes uint64 BlockGroup uint64 NLink uint32 UID uint32 GID uint32 Mode uint32 RDev uint64 Flags uint64 Sequence uint64 // modification sequence number for NFS ATime time.Time CTime time.Time MTime time.Time OTime time.Time } func asRootItem(p []byte) *btrfs_root_item_raw { return (*btrfs_root_item_raw)(unsafe.Pointer(&p[0])) } type btrfs_root_item_raw [439]byte func (p btrfs_root_item_raw) Decode() rootItem { const ( off2 = unsafe.Sizeof(btrfs_root_item_raw_p1{}) off3 = off2 + 23 ) p1 := (*btrfs_root_item_raw_p1)(unsafe.Pointer(&p[0])) p2 := p[off2 : off2+23] p2_k := (*btrfs_disk_key_raw)(unsafe.Pointer(&p[off2+4])) p2_b := p2[4+17:] p3 := (*btrfs_root_item_raw_p3)(unsafe.Pointer(&p[off3])) return rootItem{ Inode: p1.inode.Decode(), Gen: p1.generation, RootDirID: p1.root_dirid, ByteNr: p1.bytenr, ByteLimit: p1.byte_limit, BytesUsed: p1.bytes_used, LastSnapshot: p1.last_snapshot, Flags: p1.flags, // from here, Go structure become misaligned with C structure Refs: asUint32(p2[0:]), DropProgress: p2_k.Decode(), DropLevel: p2_b[0], Level: p2_b[1], // these fields are still misaligned by 1 bytes // TODO(dennwc): it's a copy of Gen to check structure version; hide it maybe? GenV2: p3.generation_v2, UUID: p3.uuid, ParentUUID: p3.parent_uuid, ReceivedUUID: p3.received_uuid, CTransID: p3.ctransid, OTransID: p3.otransid, STransID: p3.stransid, RTransID: p3.rtransid, CTime: p3.times[0].Decode(), OTime: p3.times[1].Decode(), STime: p3.times[2].Decode(), RTime: p3.times[3].Decode(), } } type rootItem struct { Inode inodeItem Gen uint64 RootDirID uint64 ByteNr uint64 ByteLimit uint64 BytesUsed uint64 LastSnapshot uint64 Flags uint64 Refs uint32 DropProgress diskKey DropLevel uint8 Level uint8 GenV2 uint64 UUID UUID ParentUUID UUID ReceivedUUID UUID CTransID uint64 OTransID uint64 STransID uint64 RTransID uint64 CTime time.Time OTime time.Time STime time.Time RTime time.Time } type btrfs_root_item_raw_p1 struct { inode btrfs_inode_item_raw generation uint64 root_dirid uint64 bytenr uint64 byte_limit uint64 bytes_used uint64 last_snapshot uint64 flags uint64 } type btrfs_root_item_raw_p2 struct { refs uint32 drop_progress btrfs_disk_key_raw drop_level uint8 level uint8 } type btrfs_root_item_raw_p3 struct { generation_v2 uint64 uuid UUID parent_uuid UUID received_uuid UUID ctransid uint64 otransid uint64 stransid uint64 rtransid uint64 // ctime btrfs_timespec // otime btrfs_timespec // stime btrfs_timespec // rtime btrfs_timespec times timeBlock _ [8]uint64 // reserved } golang-github-dennwc-btrfs-0.0~git20240418.0167142/btrfs_tree_h.go000066400000000000000000000370561461022635500240600ustar00rootroot00000000000000package btrfs /* * This header contains the structure definitions and constants used * by file system objects that can be retrieved using * the _BTRFS_IOC_SEARCH_TREE ioctl. That means basically anything that * is needed to describe a leaf node's key or item contents. */ /* holds pointers to all of the tree roots */ /* stores information about which extents are in use, and reference counts */ /* * chunk tree stores translations from logical -> physical block numbering * the super block points to the chunk tree */ /* * stores information about which areas of a given device are in use. * one per device. The tree of tree roots points to the device tree */ /* one per subvolume, storing files and directories */ /* directory objectid inside the root tree */ /* holds checksums of all the data extents */ /* holds quota configuration and tracking */ /* for storing items that use the _BTRFS_UUID_KEY* types */ /* tracks free space in block groups. */ /* device stats in the device tree */ /* for storing balance parameters in the root tree */ /* orhpan objectid for tracking unlinked/truncated files */ /* does write ahead logging to speed up fsyncs */ /* for space balancing */ /* * extent checksums all have this objectid * this allows them to share the logging tree * for fsyncs */ /* For storing free space cache */ /* * The inode number assigned to the special inode for storing * free ino cache */ /* dummy objectid represents multiple objectids */ /* * All files have objectids in this range. */ /* * the device items go into the chunk tree. The key is in the form * [ 1 _BTRFS_DEV_ITEM_KEY device_id ] */ /* * inode items have the data typically returned from stat and store other * info about object characteristics. There is one for every file and dir in * the FS */ /* reserve 2-15 close to the inode for later flexibility */ /* * dir items are the name -> inode pointers in a directory. There is one * for every name in a directory. */ /* * extent data is for file data */ /* * extent csums are stored in a separate tree and hold csums for * an entire extent on disk. */ /* * root items point to tree roots. They are typically in the root * tree used by the super block to find all the other trees */ /* * root backrefs tie subvols and snapshots to the directory entries that * reference them */ /* * root refs make a fast index for listing all of the snapshots and * subvolumes referenced by a given root. They point directly to the * directory item in the root that references the subvol */ /* * extent items are in the extent map tree. These record which blocks * are used, and how many references there are to each block */ /* * The same as the _BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know * the length, so we save the level in key->offset instead of the length. */ /* * block groups give us hints into the extent allocation trees. Which * blocks are free etc etc */ /* * Every block group is represented in the free space tree by a free space info * item, which stores some accounting information. It is keyed on * (block_group_start, FREE_SPACE_INFO, block_group_length). */ /* * A free space extent tracks an extent of space that is free in a block group. * It is keyed on (start, FREE_SPACE_EXTENT, length). */ /* * When a block group becomes very fragmented, we convert it to use bitmaps * instead of extents. A free space bitmap is keyed on * (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with * (length / sectorsize) bits. */ /* * Records the overall state of the qgroups. * There's only one instance of this key present, * (0, _BTRFS_QGROUP_STATUS_KEY, 0) */ /* * Records the currently used space of the qgroup. * One key per qgroup, (0, _BTRFS_QGROUP_INFO_KEY, qgroupid). */ /* * Contains the user configured limits for the qgroup. * One key per qgroup, (0, _BTRFS_QGROUP_LIMIT_KEY, qgroupid). */ /* * Records the child-parent relationship of qgroups. For * each relation, 2 keys are present: * (childid, _BTRFS_QGROUP_RELATION_KEY, parentid) * (parentid, _BTRFS_QGROUP_RELATION_KEY, childid) */ /* * Obsolete name, see _BTRFS_TEMPORARY_ITEM_KEY. */ /* * The key type for tree items that are stored persistently, but do not need to * exist for extended period of time. The items can exist in any tree. * * [subtype, _BTRFS_TEMPORARY_ITEM_KEY, data] * * Existing items: * * - balance status item * (_BTRFS_BALANCE_OBJECTID, _BTRFS_TEMPORARY_ITEM_KEY, 0) */ /* * Obsolete name, see _BTRFS_PERSISTENT_ITEM_KEY */ /* * The key type for tree items that are stored persistently and usually exist * for a long period, eg. filesystem lifetime. The item kinds can be status * information, stats or preference values. The item can exist in any tree. * * [subtype, _BTRFS_PERSISTENT_ITEM_KEY, data] * * Existing items: * * - device statistics, store IO stats in the device tree, one key for all * stats * (_BTRFS_DEV_STATS_OBJECTID, _BTRFS_DEV_STATS_KEY, 0) */ /* * Persistantly stores the device replace state in the device tree. * The key is built like this: (0, _BTRFS_DEV_REPLACE_KEY, 0). */ /* * Stores items that allow to quickly map UUIDs to something else. * These items are part of the filesystem UUID tree. * The key is built like this: * (UUID_upper_64_bits, _BTRFS_UUID_KEY*, UUID_lower_64_bits). */ /* for UUIDs assigned to * received subvols */ /* * string items are for debugging. They just store a short string of * data in the FS */ /* 32 bytes in various csum fields */ /* csum types */ /* * flags definitions for directory entry item type * * Used by: * struct btrfs_dir_item.type */ /* * The key defines the order in the tree, and so it also defines (optimal) * block layout. * * objectid corresponds to the inode number. * * type tells us things about the object, and is a kind of stream selector. * so for a given inode, keys with type of 1 might refer to the inode data, * type of 2 may point to file data in the btree and type == 3 may point to * extents. * * offset is the starting byte offset for this key in the stream. * * btrfs_disk_key is in disk byte order. struct btrfs_key is always * in cpu native order. Otherwise they are identical and their sizes * should be the same (ie both packed) */ type btrfs_disk_key struct { objectid uint64 type_ uint8 offset uint64 } type btrfs_key struct { objectid uint64 type_ uint8 offset uint64 } type btrfs_dev_item struct { devid uint64 total_bytes uint64 bytes_used uint64 io_align uint32 io_width uint32 sector_size uint32 type_ uint64 generation uint64 start_offset uint64 dev_group uint32 seek_speed uint8 bandwidth uint8 uuid UUID fsid FSID } type btrfs_stripe struct { devid uint64 offset uint64 dev_uuid UUID } type btrfs_chunk struct { length uint64 owner uint64 stripe_len uint64 type_ uint64 io_align uint32 io_width uint32 sector_size uint32 num_stripes uint16 sub_stripes uint16 stripe struct { devid uint64 offset uint64 dev_uuid UUID } } /* additional stripes go here */ type btrfs_free_space_entry struct { offset uint64 bytes uint64 type_ uint8 } type btrfs_free_space_header struct { location struct { objectid uint64 type_ uint8 offset uint64 } generation uint64 num_entries uint64 num_bitmaps uint64 } /* Super block flags */ /* Errors detected */ /* * items in the extent btree are used to record the objectid of the * owner of the block and the number of references */ type btrfs_extent_item struct { refs uint64 generation uint64 flags uint64 } type btrfs_extent_item_v0 struct { refs uint32 } /* following flags only apply to tree blocks */ /* use full backrefs for extent pointers in the block */ /* * this flag is only used internally by scrub and may be changed at any time * it is only declared here to avoid collisions */ type btrfs_tree_block_info struct { key struct { objectid uint64 type_ uint8 offset uint64 } level uint8 } type btrfs_extent_data_ref struct { root uint64 objectid uint64 offset uint64 count uint32 } type btrfs_shared_data_ref struct { count uint32 } type btrfs_extent_inline_ref struct { type_ uint8 offset uint64 } /* old style backrefs item */ type btrfs_extent_ref_v0 struct { root uint64 generation uint64 objectid uint64 count uint32 } /* dev extents record free space on individual devices. The owner * field points back to the chunk allocation mapping tree that allocated * the extent. The chunk tree uuid field is a way to double check the owner */ type btrfs_dev_extent struct { chunk_tree uint64 chunk_objectid uint64 chunk_offset uint64 length uint64 chunk_tree_uuid UUID } type btrfs_inode_ref struct { index uint64 name_len uint16 } /* name goes here */ type btrfs_inode_extref struct { parent_objectid uint64 index uint64 name_len uint16 //name [0]uint8 } /* name goes here */ type btrfs_timespec struct { sec uint64 nsec uint32 } type btrfs_inode_item struct { generation uint64 transid uint64 size uint64 nbytes uint64 block_group uint64 nlink uint32 uid uint32 gid uint32 mode uint32 rdev uint64 flags uint64 sequence uint64 reserved [4]uint64 atime struct { sec uint64 nsec uint32 } ctime struct { sec uint64 nsec uint32 } mtime struct { sec uint64 nsec uint32 } otime struct { sec uint64 nsec uint32 } } type btrfs_dir_log_item struct { end uint64 } type btrfs_dir_item struct { location struct { objectid uint64 type_ uint8 offset uint64 } transid uint64 data_len uint16 name_len uint16 type_ uint8 } /* * Internal in-memory flag that a subvolume has been marked for deletion but * still visible as a directory */ type btrfs_root_item struct { inode struct { generation uint64 transid uint64 size uint64 nbytes uint64 block_group uint64 nlink uint32 uid uint32 gid uint32 mode uint32 rdev uint64 flags uint64 sequence uint64 reserved [4]uint64 atime struct { sec uint64 nsec uint32 } ctime struct { sec uint64 nsec uint32 } mtime struct { sec uint64 nsec uint32 } otime struct { sec uint64 nsec uint32 } } generation uint64 root_dirid uint64 bytenr uint64 byte_limit uint64 bytes_used uint64 last_snapshot uint64 flags uint64 refs uint32 drop_progress struct { objectid uint64 type_ uint8 offset uint64 } drop_level uint8 level uint8 generation_v2 uint64 uuid UUID parent_uuid UUID received_uuid UUID ctransid uint64 otransid uint64 stransid uint64 rtransid uint64 ctime struct { sec uint64 nsec uint32 } otime struct { sec uint64 nsec uint32 } stime struct { sec uint64 nsec uint32 } rtime struct { sec uint64 nsec uint32 } reserved [8]uint64 } /* * this is used for both forward and backward root refs */ type btrfs_root_ref struct { dirid uint64 sequence uint64 name_len uint16 } type btrfs_disk_balance_args struct { profiles uint64 usage uint64 usage_min uint32 usage_max uint32 devid uint64 pstart uint64 pend uint64 vstart uint64 vend uint64 target uint64 flags uint64 limit uint64 limit_min uint32 limit_max uint32 stripes_min uint32 stripes_max uint32 unused [6]uint64 } /* * store balance parameters to disk so that balance can be properly * resumed after crash or unmount */ type btrfs_balance_item struct { flags uint64 data struct { profiles uint64 usage uint64 usage_min uint32 usage_max uint32 devid uint64 pstart uint64 pend uint64 vstart uint64 vend uint64 target uint64 flags uint64 limit uint64 limit_min uint32 limit_max uint32 stripes_min uint32 stripes_max uint32 unused [6]uint64 } meta struct { profiles uint64 usage uint64 usage_min uint32 usage_max uint32 devid uint64 pstart uint64 pend uint64 vstart uint64 vend uint64 target uint64 flags uint64 limit uint64 limit_min uint32 limit_max uint32 stripes_min uint32 stripes_max uint32 unused [6]uint64 } sys struct { profiles uint64 usage uint64 usage_min uint32 usage_max uint32 devid uint64 pstart uint64 pend uint64 vstart uint64 vend uint64 target uint64 flags uint64 limit uint64 limit_min uint32 limit_max uint32 stripes_min uint32 stripes_max uint32 unused [6]uint64 } unused [4]uint64 } type btrfs_file_extent_item struct { generation uint64 ram_bytes uint64 compression uint8 encryption uint8 other_encoding uint16 type_ uint8 disk_bytenr uint64 disk_num_bytes uint64 offset uint64 num_bytes uint64 } type btrfs_csum_item struct { csum uint8 } type btrfs_dev_stats_item struct { values [_BTRFS_DEV_STAT_VALUES_MAX]uint64 } type btrfs_dev_replace_item struct { src_devid uint64 cursor_left uint64 cursor_right uint64 cont_reading_from_srcdev_mode uint64 replace_state uint64 time_started uint64 time_stopped uint64 num_write_errors uint64 num_uncorrectable_read_errors uint64 } /* different types of block groups (and chunks) */ const ( _BTRFS_RAID_RAID10 = iota _BTRFS_RAID_RAID1 _BTRFS_RAID_DUP _BTRFS_RAID_RAID0 _BTRFS_RAID_SINGLE _BTRFS_RAID_RAID5 _BTRFS_RAID_RAID6 _BTRFS_NR_RAID_TYPES ) /* * We need a bit for restriper to be able to tell when chunks of type * SINGLE are available. This "extended" profile format is used in * fs_info->avail_*_alloc_bits (in-memory) and balance item fields * (on-disk). The corresponding on-disk bit in chunk.type is reserved * to avoid remappings between two formats in future. */ /* * A fake block group type that is used to communicate global block reserve * size to userspace via the SPACE_INFO ioctl. */ func chunk_to_extended(flags uint64) uint64 { if flags&uint64(_BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 { flags |= uint64(availAllocBitSingle) } return flags } func extended_to_chunk(flags uint64) uint64 { return flags &^ uint64(availAllocBitSingle) } type btrfs_block_group_item struct { used uint64 chunk_objectid uint64 flags uint64 } type btrfs_free_space_info struct { extent_count uint32 flags uint32 } func btrfs_qgroup_level(qgroupid uint64) uint64 { return qgroupid >> uint32(qgroupLevelShift) } /* * is subvolume quota turned on? */ /* * RESCAN is set during the initialization phase */ /* * Some qgroup entries are known to be out of date, * either because the configuration has changed in a way that * makes a rescan necessary, or because the fs has been mounted * with a non-qgroup-aware version. * Turning qouta off and on again makes it inconsistent, too. */ type btrfs_qgroup_status_item struct { version uint64 generation uint64 flags uint64 rescan uint64 } type btrfs_qgroup_info_item struct { generation uint64 rfer uint64 rfer_cmpr uint64 excl uint64 excl_cmpr uint64 } type btrfs_qgroup_limit_item struct { flags uint64 max_rfer uint64 max_excl uint64 rsv_rfer uint64 rsv_excl uint64 } golang-github-dennwc-btrfs-0.0~git20240418.0167142/btrfs_tree_hc.go000066400000000000000000000404441461022635500242160ustar00rootroot00000000000000package btrfs // This code was auto-generated; DO NOT EDIT! type treeKeyType uint32 type objectID uint64 type fileType int type fileExtentType int type devReplaceItemState int type blockGroup uint64 // This header contains the structure definitions and constants used // by file system objects that can be retrieved using // the BTRFS_IOC_SEARCH_TREE ioctl. That means basically anything that // is needed to describe a leaf node's key or item contents. const ( // Holds pointers to all of the tree roots rootTreeObjectid objectID = 1 // Stores information about which extents are in use, and reference counts extentTreeObjectid objectID = 2 // Chunk tree stores translations from logical -> physical block numbering // the super block points to the chunk tree chunkTreeObjectid objectID = 3 // Stores information about which areas of a given device are in use. // one per device. The tree of tree roots points to the device tree devTreeObjectid objectID = 4 // One per subvolume, storing files and directories fsTreeObjectid objectID = 5 // Directory objectid inside the root tree rootTreeDirObjectid objectID = 6 // Holds checksums of all the data extents csumTreeObjectid objectID = 7 // Holds quota configuration and tracking quotaTreeObjectid objectID = 8 // For storing items that use the BTRFS_UUID_KEY* types uuidTreeObjectid objectID = 9 // Tracks free space in block groups. freeSpaceTreeObjectid objectID = 10 // Device stats in the device tree devStatsObjectid objectID = 0 // For storing balance parameters in the root tree balanceObjectid objectID = (1<<64 - 4) // Orhpan objectid for tracking unlinked/truncated files orphanObjectid objectID = (1<<64 - 5) // Does write ahead logging to speed up fsyncs treeLogObjectid objectID = (1<<64 - 6) treeLogFixupObjectid objectID = (1<<64 - 7) // For space balancing treeRelocObjectid objectID = (1<<64 - 8) dataRelocTreeObjectid objectID = (1<<64 - 9) // Extent checksums all have this objectid // this allows them to share the logging tree // for fsyncs extentCsumObjectid objectID = (1<<64 - 10) // For storing free space cache freeSpaceObjectid objectID = (1<<64 - 11) // The inode number assigned to the special inode for storing // free ino cache freeInoObjectid objectID = (1<<64 - 12) // Dummy objectid represents multiple objectids multipleObjectids = (1<<64 - 255) // All files have objectids in this range. firstFreeObjectid objectID = 256 lastFreeObjectid objectID = (1<<64 - 256) firstChunkTreeObjectid objectID = 256 // The device items go into the chunk tree. The key is in the form // [ 1 BTRFS_DEV_ITEM_KEY device_id ] devItemsObjectid objectID = 1 btreeInodeObjectid objectID = 1 emptySubvolDirObjectid objectID = 2 devReplaceDevid = 0 // Inode items have the data typically returned from stat and store other // info about object characteristics. There is one for every file and dir in // the FS inodeItemKey treeKeyType = 1 inodeRefKey treeKeyType = 12 inodeExtrefKey treeKeyType = 13 xattrItemKey treeKeyType = 24 orphanItemKey treeKeyType = 48 // Reserve 2-15 close to the inode for later flexibility // Dir items are the name -> inode pointers in a directory. There is one // for every name in a directory. dirLogItemKey treeKeyType = 60 dirLogIndexKey treeKeyType = 72 dirItemKey treeKeyType = 84 dirIndexKey treeKeyType = 96 // Extent data is for file data extentDataKey treeKeyType = 108 // Extent csums are stored in a separate tree and hold csums for // an entire extent on disk. extentCsumKey treeKeyType = 128 // Root items point to tree roots. They are typically in the root // tree used by the super block to find all the other trees rootItemKey treeKeyType = 132 // Root backrefs tie subvols and snapshots to the directory entries that // reference them rootBackrefKey treeKeyType = 144 // Root refs make a fast index for listing all of the snapshots and // subvolumes referenced by a given root. They point directly to the // directory item in the root that references the subvol rootRefKey treeKeyType = 156 // Extent items are in the extent map tree. These record which blocks // are used, and how many references there are to each block extentItemKey treeKeyType = 168 // The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know // the length, so we save the level in key->offset instead of the length. metadataItemKey treeKeyType = 169 treeBlockRefKey treeKeyType = 176 extentDataRefKey treeKeyType = 178 extentRefV0Key treeKeyType = 180 sharedBlockRefKey treeKeyType = 182 sharedDataRefKey treeKeyType = 184 // Block groups give us hints into the extent allocation trees. Which // blocks are free etc etc blockGroupItemKey treeKeyType = 192 // Every block group is represented in the free space tree by a free space info // item, which stores some accounting information. It is keyed on // (block_group_start, FREE_SPACE_INFO, block_group_length). freeSpaceInfoKey treeKeyType = 198 // A free space extent tracks an extent of space that is free in a block group. // It is keyed on (start, FREE_SPACE_EXTENT, length). freeSpaceExtentKey treeKeyType = 199 // When a block group becomes very fragmented, we convert it to use bitmaps // instead of extents. A free space bitmap is keyed on // (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with // (length / sectorsize) bits. freeSpaceBitmapKey treeKeyType = 200 devExtentKey treeKeyType = 204 devItemKey treeKeyType = 216 chunkItemKey treeKeyType = 228 // Records the overall state of the qgroups. // There's only one instance of this key present, // (0, BTRFS_QGROUP_STATUS_KEY, 0) qgroupStatusKey treeKeyType = 240 // Records the currently used space of the qgroup. // One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid). qgroupInfoKey treeKeyType = 242 // Contains the user configured limits for the qgroup. // One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid). qgroupLimitKey treeKeyType = 244 // Records the child-parent relationship of qgroups. For // each relation, 2 keys are present: // (childid, BTRFS_QGROUP_RELATION_KEY, parentid) // (parentid, BTRFS_QGROUP_RELATION_KEY, childid) qgroupRelationKey treeKeyType = 246 // Obsolete name, see BTRFS_TEMPORARY_ITEM_KEY. balanceItemKey treeKeyType = 248 // The key type for tree items that are stored persistently, but do not need to // exist for extended period of time. The items can exist in any tree. // [subtype, BTRFS_TEMPORARY_ITEM_KEY, data] // Existing items: // - balance status item // (BTRFS_BALANCE_OBJECTID, BTRFS_TEMPORARY_ITEM_KEY, 0) temporaryItemKey treeKeyType = 248 // Obsolete name, see BTRFS_PERSISTENT_ITEM_KEY devStatsKey treeKeyType = 249 // The key type for tree items that are stored persistently and usually exist // for a long period, eg. filesystem lifetime. The item kinds can be status // information, stats or preference values. The item can exist in any tree. // [subtype, BTRFS_PERSISTENT_ITEM_KEY, data] // Existing items: // - device statistics, store IO stats in the device tree, one key for all // stats // (BTRFS_DEV_STATS_OBJECTID, BTRFS_DEV_STATS_KEY, 0) persistentItemKey treeKeyType = 249 // Persistantly stores the device replace state in the device tree. // The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0). devReplaceKey treeKeyType = 250 // Stores items that allow to quickly map UUIDs to something else. // These items are part of the filesystem UUID tree. // The key is built like this: // (UUID_upper_64_bits, BTRFS_UUID_KEY*, UUID_lower_64_bits). uuidKeySubvol = 251 uuidKeyReceivedSubvol = 252 // String items are for debugging. They just store a short string of // data in the FS stringItemKey treeKeyType = 253 // 32 bytes in various csum fields csumSize = 32 // Csum types csumTypeCrc32 = 0 // Flags definitions for directory entry item type // Used by: // struct btrfs_dir_item.type ftUnknown fileType = 0 ftRegFile fileType = 1 ftDir fileType = 2 ftChrdev fileType = 3 ftBlkdev fileType = 4 ftFifo fileType = 5 ftSock fileType = 6 ftSymlink fileType = 7 ftXattr fileType = 8 ftMax fileType = 9 // The key defines the order in the tree, and so it also defines (optimal) // block layout. // objectid corresponds to the inode number. // type tells us things about the object, and is a kind of stream selector. // so for a given inode, keys with type of 1 might refer to the inode data, // type of 2 may point to file data in the btree and type == 3 may point to // extents. // offset is the starting byte offset for this key in the stream. // btrfs_disk_key is in disk byte order. struct btrfs_key is always // in cpu native order. Otherwise they are identical and their sizes // should be the same (ie both packed) // The internal btrfs device id // Size of the device // Bytes used // Optimal io alignment for this device // Optimal io width for this device // Minimal io size for this device // Type and info about this device // Expected generation for this device // Starting byte of this partition on the device, // to allow for stripe alignment in the future // Grouping information for allocation decisions // Seek speed 0-100 where 100 is fastest // Bandwidth 0-100 where 100 is fastest // Btrfs generated uuid for this device // Uuid of FS who owns this device // Size of this chunk in bytes // Objectid of the root referencing this chunk // Optimal io alignment for this chunk // Optimal io width for this chunk // Minimal io size for this chunk // 2^16 stripes is quite a lot, a second limit is the size of a single // item in the btree // Sub stripes only matter for raid10 // Additional stripes go here freeSpaceExtent = 1 freeSpaceBitmap = 2 headerFlagWritten = (1 << 0) headerFlagReloc = (1 << 1) // Super block flags // Errors detected superFlagError = (1 << 2) superFlagSeeding = (1 << 32) superFlagMetadump = (1 << 33) // Items in the extent btree are used to record the objectid of the // owner of the block and the number of references extentFlagData = (1 << 0) extentFlagTreeBlock = (1 << 1) // Following flags only apply to tree blocks // Use full backrefs for extent pointers in the block blockFlagFullBackref = (1 << 8) // This flag is only used internally by scrub and may be changed at any time // it is only declared here to avoid collisions extentFlagSuper = (1 << 48) // Old style backrefs item // Dev extents record free space on individual devices. The owner // field points back to the chunk allocation mapping tree that allocated // the extent. The chunk tree uuid field is a way to double check the owner // Name goes here // Name goes here // Nfs style generation number // Transid that last touched this inode // Modification sequence number for NFS // A little future expansion, for more than this we can // just grow the inode item and version it rootSubvolRdonly = (1 << 0) // Internal in-memory flag that a subvolume has been marked for deletion but // still visible as a directory rootSubvolDead = (1 << 48) // The following fields appear after subvol_uuids+subvol_times // were introduced. // This generation number is used to test if the new fields are valid // and up to date while reading the root item. Every time the root item // is written out, the "generation" field is copied into this field. If // anyone ever mounted the fs with an older kernel, we will have // mismatching generation values here and thus must invalidate the // new fields. See btrfs_update_root and btrfs_find_last_root for // details. // the offset of generation_v2 is also used as the start for the memset // when invalidating the fields. // This is used for both forward and backward root refs // Profiles to operate on, single is denoted by // BTRFS_AVAIL_ALLOC_BIT_SINGLE // Usage filter // BTRFS_BALANCE_ARGS_USAGE with a single value means '0..N' // BTRFS_BALANCE_ARGS_USAGE_RANGE - range syntax, min..max // Devid filter // Devid subset filter [pstart..pend) // Btrfs virtual address space subset filter [vstart..vend) // Profile to convert to, single is denoted by // BTRFS_AVAIL_ALLOC_BIT_SINGLE // BTRFS_BALANCE_ARGS_* // BTRFS_BALANCE_ARGS_LIMIT with value 'limit' // BTRFS_BALANCE_ARGS_LIMIT_RANGE - the extend version can use minimum // and maximum // Process chunks that cross stripes_min..stripes_max devices, // BTRFS_BALANCE_ARGS_STRIPES_RANGE // Store balance parameters to disk so that balance can be properly // resumed after crash or unmount // BTRFS_BALANCE_* fileExtentInline fileExtentType = 0 fileExtentReg fileExtentType = 1 fileExtentPrealloc fileExtentType = 2 // Transaction id that created this extent // Max number of bytes to hold this extent in ram // when we split a compressed extent we can't know how big // each of the resulting pieces will be. So, this is // an upper limit on the size of the extent in ram instead of // an exact limit. // 32 bits for the various ways we might encode the data, // including compression and encryption. If any of these // are set to something a given disk format doesn't understand // it is treated like an incompat flag for reading and writing, // but not for stat. // Are we inline data or a real extent? // Disk space consumed by the extent, checksum blocks are included // in these numbers // At this offset in the structure, the inline extent data start. // The logical offset in file blocks (no csums) // this extent record is for. This allows a file extent to point // into the middle of an existing extent on disk, sharing it // between two snapshots (useful if some bytes in the middle of the // extent have changed // The logical number of file blocks (no csums included). This // always reflects the size uncompressed and without encoding. // Grow this item struct at the end for future enhancements and keep // the existing values unchanged devReplaceItemContReadingFromSrcdevModeAlways = 0 devReplaceItemContReadingFromSrcdevModeAvoid = 1 devReplaceItemStateNeverStarted devReplaceItemState = 0 devReplaceItemStateStarted devReplaceItemState = 1 devReplaceItemStateSuspended devReplaceItemState = 2 devReplaceItemStateFinished devReplaceItemState = 3 devReplaceItemStateCanceled devReplaceItemState = 4 // Grow this item struct at the end for future enhancements and keep // the existing values unchanged // Different types of block groups (and chunks) blockGroupData blockGroup = (1 << 0) blockGroupSystem blockGroup = (1 << 1) blockGroupMetadata blockGroup = (1 << 2) blockGroupRaid0 blockGroup = (1 << 3) blockGroupRaid1 blockGroup = (1 << 4) blockGroupDup blockGroup = (1 << 5) blockGroupRaid10 blockGroup = (1 << 6) blockGroupRaid5 blockGroup = (1 << 7) blockGroupRaid6 blockGroup = (1 << 8) // We need a bit for restriper to be able to tell when chunks of type // SINGLE are available. This "extended" profile format is used in // fs_info->avail_*_alloc_bits (in-memory) and balance item fields // (on-disk). The corresponding on-disk bit in chunk.type is reserved // to avoid remappings between two formats in future. availAllocBitSingle = (1 << 48) // A fake block group type that is used to communicate global block reserve // size to userspace via the SPACE_INFO ioctl. spaceInfoGlobalRsv = (1 << 49) freeSpaceUsingBitmaps = (1 << 0) qgroupLevelShift = 48 // Is subvolume quota turned on? qgroupStatusFlagOn = (1 << 0) // RESCAN is set during the initialization phase qgroupStatusFlagRescan = (1 << 1) // Some qgroup entries are known to be out of date, // either because the configuration has changed in a way that // makes a rescan necessary, or because the fs has been mounted // with a non-qgroup-aware version. // Turning qouta off and on again makes it inconsistent, too. qgroupStatusFlagInconsistent = (1 << 2) qgroupStatusVersion = 1 // The generation is updated during every commit. As older // versions of btrfs are not aware of qgroups, it will be // possible to detect inconsistencies by checking the // generation on mount time // Flag definitions see above // Only used during scanning to record the progress // of the scan. It contains a logical address // Only updated when any of the other values change ) golang-github-dennwc-btrfs-0.0~git20240418.0167142/cmd/000077500000000000000000000000001461022635500216135ustar00rootroot00000000000000golang-github-dennwc-btrfs-0.0~git20240418.0167142/cmd/gbtrfs/000077500000000000000000000000001461022635500231025ustar00rootroot00000000000000golang-github-dennwc-btrfs-0.0~git20240418.0167142/cmd/gbtrfs/gbtrfs.go000066400000000000000000000064401461022635500247240ustar00rootroot00000000000000package main import ( "fmt" "os" "github.com/dennwc/btrfs" "github.com/spf13/cobra" ) func init() { RootCmd.AddCommand( SubvolumeCmd, SendCmd, ReceiveCmd, ) SubvolumeCmd.AddCommand( SubvolumeCreateCmd, SubvolumeDeleteCmd, SubvolumeListCmd, ) SendCmd.Flags().StringP("parent", "p", "", "Send an incremental stream from to .") } var RootCmd = &cobra.Command{ Use: "btrfs [--help] [--version] [...] []", Short: "Use --help as an argument for information on a specific group or command.", } var SubvolumeCmd = &cobra.Command{ Use: "subvolume ", Aliases: []string{"subvol", "sub", "sv"}, } var SubvolumeCreateCmd = &cobra.Command{ Use: "create [-i ] [/]", Short: "Create a subvolume", Long: `Create a subvolume in . If is not given subvolume will be created in the current directory.`, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return fmt.Errorf("subvolume not specified") } else if len(args) > 1 { return fmt.Errorf("only one subvolume name is allowed") } return btrfs.CreateSubVolume(args[0]) }, } var SubvolumeDeleteCmd = &cobra.Command{ Use: "delete [options] [...]", Short: "Delete subvolume(s)", Long: `Delete subvolumes from the filesystem. The corresponding directory is removed instantly but the data blocks are removed later. The deletion does not involve full commit by default due to performance reasons (as a consequence, the subvolume may appear again after a crash). Use one of the --commit options to wait until the operation is safely stored on the media.`, RunE: func(cmd *cobra.Command, args []string) error { for _, arg := range args { if err := btrfs.DeleteSubVolume(arg); err != nil { return err } } return nil }, } var SubvolumeListCmd = &cobra.Command{ Use: "list ", Short: "List subvolumes", Aliases: []string{"ls"}, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf("expected one destination argument") } fs, err := btrfs.Open(args[0], true) if err != nil { return err } defer fs.Close() list, err := fs.ListSubvolumes(nil) if err == nil { for _, v := range list { fmt.Printf("%+v\n", v) } } return err }, } var SendCmd = &cobra.Command{ Use: "send [-v] [-p ] [-c ] [-f ] [...]", Short: "Send the subvolume(s) to stdout.", Long: `Sends the subvolume(s) specified by to stdout. should be read-only here.`, RunE: func(cmd *cobra.Command, args []string) error { parent, _ := cmd.Flags().GetString("parent") return btrfs.Send(os.Stdout, parent, args...) }, } var ReceiveCmd = &cobra.Command{ Use: "receive [-v] [-f ] [--max-errors ] ", Short: "Receive subvolumes from stdin.", Long: `Receives one or more subvolumes that were previously sent with btrfs send. The received subvolumes are stored into .`, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf("expected one destination argument") } return btrfs.Receive(os.Stdin, args[0]) }, } func main() { if err := RootCmd.Execute(); err != nil { fmt.Println(err) os.Exit(-1) } } golang-github-dennwc-btrfs-0.0~git20240418.0167142/cmd/gbtrfs/go.mod000066400000000000000000000004531461022635500242120ustar00rootroot00000000000000module github.com/dennwc/btrfs/cmd/gbtrfs go 1.12 require ( github.com/dennwc/btrfs v0.0.0-20181021180244-694b569856e3 github.com/dennwc/ioctl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/spf13/cobra v0.0.3 github.com/spf13/pflag v1.0.3 // indirect ) golang-github-dennwc-btrfs-0.0~git20240418.0167142/cmd/gbtrfs/go.sum000066400000000000000000000016071461022635500242410ustar00rootroot00000000000000github.com/dennwc/btrfs v0.0.0-20181021180244-694b569856e3 h1:gvAC0SRt17o5OEwJU+0Iz298dfYF/aJlSfKf9NRay6c= github.com/dennwc/btrfs v0.0.0-20181021180244-694b569856e3/go.mod h1:8k+PMLjFlirprJbTSZJbkj8SEkfTAn3b0JhgPPE78HI= github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg= github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= golang-github-dennwc-btrfs-0.0~git20240418.0167142/errors.go000066400000000000000000000024211461022635500227120ustar00rootroot00000000000000package btrfs import ( "errors" "fmt" ) type ErrNotBtrfs struct { Path string } func (e ErrNotBtrfs) Error() string { return fmt.Sprintf("not a btrfs filesystem: %s", e.Path) } // Error codes as returned by the kernel type ErrCode int func (e ErrCode) Error() string { s, ok := errorString[e] if ok { return s } return fmt.Sprintf("error %d", int(e)) } const ( ErrDevRAID1MinNotMet = ErrCode(iota + 1) ErrDevRAID10MinNotMet ErrDevRAID5MinNotMet ErrDevRAID6MinNotMet ErrDevTargetReplace ErrDevMissingNotFound ErrDevOnlyWritable ErrDevExclRunInProgress ) var errorString = map[ErrCode]string{ ErrDevRAID1MinNotMet: "unable to go below two devices on raid1", ErrDevRAID10MinNotMet: "unable to go below four devices on raid10", ErrDevRAID5MinNotMet: "unable to go below two devices on raid5", ErrDevRAID6MinNotMet: "unable to go below three devices on raid6", ErrDevTargetReplace: "unable to remove the dev_replace target dev", ErrDevMissingNotFound: "no missing devices found to remove", ErrDevOnlyWritable: "unable to remove the only writeable device", ErrDevExclRunInProgress: "add/delete/balance/replace/resize operation in progress", } var ( ErrNotFound = errors.New("not found") errNotImplemented = errors.New("not implemented") ) golang-github-dennwc-btrfs-0.0~git20240418.0167142/go.mod000066400000000000000000000001201461022635500221470ustar00rootroot00000000000000module github.com/dennwc/btrfs go 1.12 require github.com/dennwc/ioctl v1.0.0 golang-github-dennwc-btrfs-0.0~git20240418.0167142/go.sum000066400000000000000000000002451461022635500222040ustar00rootroot00000000000000github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg= github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0= golang-github-dennwc-btrfs-0.0~git20240418.0167142/headers.go000066400000000000000000000005051461022635500230120ustar00rootroot00000000000000package btrfs //go:generate go run ./cmd/hgen.go -u -g -t BTRFS_ -p btrfs -cs=treeKeyType:uint32=_KEY,objectID:uint64=_OBJECTID -cp=fileType=FT_,fileExtentType=FILE_EXTENT_,devReplaceItemState=DEV_REPLACE_ITEM_STATE_,blockGroup:uint64=BLOCK_GROUP_ -o btrfs_tree_hc.go btrfs_tree.h //go:generate gofmt -l -w btrfs_tree_hc.go golang-github-dennwc-btrfs-0.0~git20240418.0167142/internal/000077500000000000000000000000001461022635500226645ustar00rootroot00000000000000golang-github-dennwc-btrfs-0.0~git20240418.0167142/internal/cmd/000077500000000000000000000000001461022635500234275ustar00rootroot00000000000000golang-github-dennwc-btrfs-0.0~git20240418.0167142/internal/cmd/hgen.go000066400000000000000000000110541461022635500247000ustar00rootroot00000000000000package cmd import ( "bufio" "bytes" "flag" "fmt" "io" "log" "os" "regexp" "strings" "unicode" ) var ( f_pkg = flag.String("p", "main", "package name for generated file") f_out = flag.String("o", "-", "output file") f_unexport = flag.Bool("u", true, "make all definitions unexported") f_goname = flag.Bool("g", true, "rename symbols to follow Go conventions") f_trim = flag.String("t", "", "prefix to trim from names") f_constSuf = flag.String("cs", "", "comma-separated list of constant suffixes to create typed constants") f_constPref = flag.String("cp", "", "comma-separated list of constant prefixes to create typed constants") ) var ( reDefineIntConst = regexp.MustCompile(`#define\s+([A-Za-z_][A-Za-z\d_]*)\s+(\(?-?\d+(?:U?LL)?(?:\s*<<\s*\d+)?\)?)`) reNegULL = regexp.MustCompile(`-(\d+)ULL`) ) var ( constTypes []constType ) type constType struct { Name string Type string Suffix string Prefix string } func constName(s string) string { s = strings.TrimPrefix(s, *f_trim) typ := "" for _, t := range constTypes { if t.Suffix != "" && strings.HasSuffix(s, t.Suffix) { //s = strings.TrimSuffix(s, t.Suffix) typ = t.Name break } else if t.Prefix != "" && strings.HasPrefix(s, t.Prefix) { typ = t.Name break } } if *f_goname { buf := bytes.NewBuffer(nil) buf.Grow(len(s)) up := !*f_unexport for _, r := range s { if r == '_' { up = true continue } if up { up = false r = unicode.ToUpper(r) } else { r = unicode.ToLower(r) } buf.WriteRune(r) } s = buf.String() } else if *f_unexport { s = "_" + s } if typ != "" { s += " " + typ } return s } func process(w io.Writer, path string) error { file, err := os.Open(path) if err != nil { return err } defer file.Close() r := bufio.NewReader(file) var ( comment = false firstComment = true firstLineInComment = false ) nl := true defer fmt.Fprintln(w, ")") for { line, err := r.ReadBytes('\n') if err == io.EOF { return nil } else if err != nil { return err } line = bytes.TrimSpace(line) if len(line) == 0 { if !nl { nl = true w.Write([]byte("\n")) } continue } nl = false if bytes.HasPrefix(line, []byte("/*")) { comment = true firstLineInComment = true line = bytes.TrimPrefix(line, []byte("/*")) } if comment { ends := bytes.HasSuffix(line, []byte("*/")) if ends { comment = false line = bytes.TrimSuffix(line, []byte("*/")) } line = bytes.TrimLeft(line, " \t*") if len(line) > 0 { if !firstComment { w.Write([]byte("\t")) } w.Write([]byte("// ")) if firstLineInComment { line[0] = byte(unicode.ToUpper(rune(line[0]))) } line = bytes.Replace(line, []byte(" "), []byte(" "), -1) w.Write(line) w.Write([]byte("\n")) firstLineInComment = false } if ends && firstComment { firstComment = false fmt.Fprint(w, "\nconst (\n") nl = true } firstLineInComment = firstLineInComment && !ends continue } if bytes.HasPrefix(line, []byte("#define")) { sub := reDefineIntConst.FindStringSubmatch(string(line)) if len(sub) > 0 { name, val := sub[1], sub[2] if sub := reNegULL.FindAllStringSubmatch(val, -1); len(sub) > 0 { for _, s := range sub { val = strings.Replace(val, s[0], fmt.Sprintf("(1<<64 - %s)", s[1]), -1) } } val = strings.Replace(val, "ULL", "", -1) fmt.Fprintf(w, "\t%s = %s\n", constName(name), val) continue } } } } func regConstTypes(str string, fnc func(*constType, string)) { for _, s := range strings.Split(str, ",") { kv := strings.Split(s, "=") if len(kv) != 2 { continue } st := strings.Split(kv[0], ":") typ := "int" if len(st) > 1 { typ = st[1] } t := constType{Name: st[0], Type: typ} fnc(&t, kv[1]) constTypes = append(constTypes, t) } } func main() { flag.Parse() if suf := *f_constSuf; suf != "" { regConstTypes(suf, func(t *constType, v string) { t.Suffix = v }) } if pref := *f_constPref; pref != "" { regConstTypes(pref, func(t *constType, v string) { t.Prefix = v }) } var w io.Writer = os.Stdout if path := *f_out; path != "" && path != "-" { file, err := os.Create(path) if err != nil { log.Fatal(err) } defer file.Close() w = file } fmt.Fprintf(w, "package %s\n\n", *f_pkg) fmt.Fprint(w, "// This code was auto-generated; DO NOT EDIT!\n\n") for _, t := range constTypes { fmt.Fprintf(w, "type %s %s\n\n", t.Name, t.Type) } for _, path := range flag.Args() { if err := process(w, path); err != nil { log.Fatal(err) } } } golang-github-dennwc-btrfs-0.0~git20240418.0167142/ioctl/000077500000000000000000000000001461022635500221625ustar00rootroot00000000000000golang-github-dennwc-btrfs-0.0~git20240418.0167142/ioctl/ioctl.go000066400000000000000000000020311461022635500236170ustar00rootroot00000000000000package ioctl import ( "github.com/dennwc/ioctl" "os" ) const ( None = ioctl.None Write = ioctl.Write Read = ioctl.Read ) // IOC // // Deprecated: use github/dennwc/ioctl func IOC(dir, typ, nr, size uintptr) uintptr { return ioctl.IOC(dir, typ, nr, size) } // IO // // Deprecated: use github/dennwc/ioctl func IO(typ, nr uintptr) uintptr { return ioctl.IO(typ, nr) } // IOC // // Deprecated: use github/dennwc/ioctl func IOR(typ, nr, size uintptr) uintptr { return ioctl.IOR(typ, nr, size) } // IOW // // Deprecated: use github/dennwc/ioctl func IOW(typ, nr, size uintptr) uintptr { return ioctl.IOW(typ, nr, size) } // IOWR // // Deprecated: use github/dennwc/ioctl func IOWR(typ, nr, size uintptr) uintptr { return ioctl.IOWR(typ, nr, size) } // Ioctl // // Deprecated: use github/dennwc/ioctl func Ioctl(f *os.File, ioc uintptr, addr uintptr) error { return ioctl.Ioctl(f, ioc, addr) } // Do // // Deprecated: use github/dennwc/ioctl func Do(f *os.File, ioc uintptr, arg interface{}) error { return ioctl.Do(f, ioc, arg) } golang-github-dennwc-btrfs-0.0~git20240418.0167142/ioctl_h.go000066400000000000000000000675151461022635500230360ustar00rootroot00000000000000package btrfs import ( "encoding/binary" "encoding/hex" "github.com/dennwc/ioctl" "os" "strconv" "strings" "unsafe" ) var order = binary.LittleEndian const ioctlMagic = 0x94 const devicePathNameMax = 1024 const ( FSIDSize = 16 UUIDSize = 16 ) var zeroUUID UUID type UUID [UUIDSize]byte func (id UUID) IsZero() bool { return id == zeroUUID } func (id UUID) String() string { if id.IsZero() { return "" } buf := make([]byte, UUIDSize*2+4) i := 0 i += hex.Encode(buf[i:], id[:4]) buf[i] = '-' i++ i += hex.Encode(buf[i:], id[4:6]) buf[i] = '-' i++ i += hex.Encode(buf[i:], id[6:8]) buf[i] = '-' i++ i += hex.Encode(buf[i:], id[8:10]) buf[i] = '-' i++ i += hex.Encode(buf[i:], id[10:]) return string(buf) } type FSID [FSIDSize]byte func (id FSID) String() string { return hex.EncodeToString(id[:]) } const volNameMax = 4087 // this should be 4k type btrfs_ioctl_vol_args struct { fd int64 name [volNameMax + 1]byte } func (arg *btrfs_ioctl_vol_args) SetName(name string) { n := copy(arg.name[:], name) arg.name[n] = 0 } type btrfs_qgroup_limit struct { flags uint64 max_referenced uint64 max_exclusive uint64 rsv_referenced uint64 rsv_exclusive uint64 } type btrfs_qgroup_inherit struct { flags uint64 num_qgroups uint64 num_ref_copies uint64 num_excl_copies uint64 lim btrfs_qgroup_limit //qgroups [0]uint64 } type btrfs_ioctl_qgroup_limit_args struct { qgroupid uint64 lim btrfs_qgroup_limit } type btrfs_ioctl_vol_args_v2_u1 struct { size uint64 qgroup_inherit *btrfs_qgroup_inherit } const subvolNameMax = 4039 type SubvolFlags uint64 func (f SubvolFlags) ReadOnly() bool { return f&SubvolReadOnly != 0 } func (f SubvolFlags) String() string { if f == 0 { return "" } var out []string if f&SubvolReadOnly != 0 { out = append(out, "RO") f = f & (^SubvolReadOnly) } if f != 0 { out = append(out, "0x"+strconv.FormatInt(int64(f), 16)) } return strings.Join(out, "|") } // flags for subvolumes // // Used by: // struct btrfs_ioctl_vol_args_v2.flags // // BTRFS_SUBVOL_RDONLY is also provided/consumed by the following ioctls: // - BTRFS_IOC_SUBVOL_GETFLAGS // - BTRFS_IOC_SUBVOL_SETFLAGS const ( subvolCreateAsync = SubvolFlags(1 << 0) SubvolReadOnly = SubvolFlags(1 << 1) subvolQGroupInherit = SubvolFlags(1 << 2) ) type btrfs_ioctl_vol_args_v2 struct { fd int64 transid uint64 flags SubvolFlags btrfs_ioctl_vol_args_v2_u1 unused [2]uint64 name [subvolNameMax + 1]byte } // structure to report errors and progress to userspace, either as a // result of a finished scrub, a canceled scrub or a progress inquiry type btrfs_scrub_progress struct { data_extents_scrubbed uint64 // # of data extents scrubbed tree_extents_scrubbed uint64 // # of tree extents scrubbed data_bytes_scrubbed uint64 // # of data bytes scrubbed tree_bytes_scrubbed uint64 // # of tree bytes scrubbed read_errors uint64 // # of read errors encountered (EIO) csum_errors uint64 // # of failed csum checks // # of occurences, where the metadata of a tree block did not match the expected values, like generation or logical verify_errors uint64 // # of 4k data block for which no csum is present, probably the result of data written with nodatasum no_csum uint64 csum_discards uint64 // # of csum for which no data was found in the extent tree. super_errors uint64 // # of bad super blocks encountered malloc_errors uint64 // # of internal kmalloc errors. These will likely cause an incomplete scrub uncorrectable_errors uint64 // # of errors where either no intact copy was found or the writeback failed corrected_errors uint64 // # of errors corrected // last physical address scrubbed. In case a scrub was aborted, this can be used to restart the scrub last_physical uint64 // # of occurences where a read for a full (64k) bio failed, but the re- // check succeeded for each 4k piece. Intermittent error. unverified_errors uint64 } type btrfs_ioctl_scrub_args struct { devid uint64 // in start uint64 // in end uint64 // in flags uint64 // in progress btrfs_scrub_progress // out // pad to 1k _ [1024 - 4*8 - unsafe.Sizeof(btrfs_scrub_progress{})]byte } type contReadingFromSrcdevMode uint64 const ( _BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS contReadingFromSrcdevMode = 0 _BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID contReadingFromSrcdevMode = 1 ) type btrfs_ioctl_dev_replace_start_params struct { srcdevid uint64 // in, if 0, use srcdev_name instead cont_reading_from_srcdev_mode contReadingFromSrcdevMode // in srcdev_name [devicePathNameMax + 1]byte // in tgtdev_name [devicePathNameMax + 1]byte // in } type devReplaceState uint64 const ( _BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED devReplaceState = 0 _BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED devReplaceState = 1 _BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED devReplaceState = 2 _BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED devReplaceState = 3 _BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED devReplaceState = 4 ) type btrfs_ioctl_dev_replace_status_params struct { replace_state devReplaceState // out progress_1000 uint64 // out, 0 <= x <= 1000 time_started uint64 // out, seconds since 1-Jan-1970 time_stopped uint64 // out, seconds since 1-Jan-1970 num_write_errors uint64 // out num_uncorrectable_read_errors uint64 // out } type btrfs_ioctl_dev_replace_args_u1 struct { cmd uint64 // in result uint64 // out start btrfs_ioctl_dev_replace_start_params // in spare [64]uint64 } type btrfs_ioctl_dev_replace_args_u2 struct { cmd uint64 // in result uint64 // out status btrfs_ioctl_dev_replace_status_params // out _ [unsafe.Sizeof(btrfs_ioctl_dev_replace_start_params{}) - unsafe.Sizeof(btrfs_ioctl_dev_replace_status_params{})]byte spare [64]uint64 } type btrfs_ioctl_dev_info_args struct { devid uint64 // in/out uuid UUID // in/out bytes_used uint64 // out total_bytes uint64 // out _ [379]uint64 // pad to 4k path [devicePathNameMax]byte // out } type btrfs_ioctl_fs_info_args struct { max_id uint64 // out num_devices uint64 // out fsid FSID // out nodesize uint32 // out sectorsize uint32 // out clone_alignment uint32 // out _ [122*8 + 4]byte // pad to 1k } type btrfs_ioctl_feature_flags struct { compat_flags FeatureFlags compat_ro_flags FeatureFlags incompat_flags IncompatFeatures } type argRange [8]byte func (u argRange) asN() uint64 { return order.Uint64(u[:]) } func (u argRange) asMinMax() (min, max uint32) { return order.Uint32(u[:4]), order.Uint32(u[4:]) } // balance control ioctl modes //#define BTRFS_BALANCE_CTL_PAUSE 1 //#define BTRFS_BALANCE_CTL_CANCEL 2 //#define BTRFS_BALANCE_CTL_RESUME 3 // this is packed, because it should be exactly the same as its disk // byte order counterpart (struct btrfs_disk_balance_args) type btrfs_balance_args struct { profiles uint64 // usage filter // BTRFS_BALANCE_ARGS_USAGE with a single value means '0..N' // BTRFS_BALANCE_ARGS_USAGE_RANGE - range syntax, min..max usage argRange devid uint64 pstart uint64 pend uint64 vstart uint64 vend uint64 target uint64 flags uint64 // BTRFS_BALANCE_ARGS_LIMIT with value 'limit' (limit number of processed chunks) // BTRFS_BALANCE_ARGS_LIMIT_RANGE - the extend version can use minimum and maximum limit argRange stripes_min uint32 stripes_max uint32 _ [48]byte } // Report balance progress to userspace. // // btrfs_balance_progress type BalanceProgress struct { Expected uint64 // estimated # of chunks that will be relocated to fulfill the request Considered uint64 // # of chunks we have considered so far Completed uint64 // # of chunks relocated so far } type BalanceState uint64 const ( BalanceStateRunning BalanceState = (1 << 0) BalanceStatePauseReq BalanceState = (1 << 1) BalanceStateCancelReq BalanceState = (1 << 2) ) type btrfs_ioctl_balance_args struct { flags BalanceFlags // in/out state BalanceState // out data btrfs_balance_args // in/out meta btrfs_balance_args // in/out sys btrfs_balance_args // in/out stat BalanceProgress // out _ [72 * 8]byte // pad to 1k } const _BTRFS_INO_LOOKUP_PATH_MAX = 4080 type btrfs_ioctl_ino_lookup_args struct { treeid objectID objectid objectID name [_BTRFS_INO_LOOKUP_PATH_MAX]byte } func (arg *btrfs_ioctl_ino_lookup_args) Name() string { n := 0 for i, b := range arg.name { if b == '\x00' { n = i break } } return string(arg.name[:n]) } type btrfs_ioctl_search_key struct { tree_id objectID // which root are we searching. 0 is the tree of tree roots // keys returned will be >= min and <= max min_objectid objectID max_objectid objectID // keys returned will be >= min and <= max min_offset uint64 max_offset uint64 // max and min transids to search for min_transid uint64 max_transid uint64 // keys returned will be >= min and <= max min_type treeKeyType max_type treeKeyType // how many items did userland ask for, and how many are we returning nr_items uint32 _ [36]byte } type btrfs_ioctl_search_header struct { transid uint64 objectid objectID offset uint64 typ treeKeyType len uint32 } const _BTRFS_SEARCH_ARGS_BUFSIZE = (4096 - unsafe.Sizeof(btrfs_ioctl_search_key{})) // the buf is an array of search headers where // each header is followed by the actual item // the type field is expanded to 32 bits for alignment type btrfs_ioctl_search_args struct { key btrfs_ioctl_search_key buf [_BTRFS_SEARCH_ARGS_BUFSIZE]byte } // Extended version of TREE_SEARCH ioctl that can return more than 4k of bytes. // The allocated size of the buffer is set in buf_size. type btrfs_ioctl_search_args_v2 struct { key btrfs_ioctl_search_key // in/out - search parameters buf_size uint64 // in - size of buffer; out - on EOVERFLOW: needed size to store item //buf [0]uint64 // out - found items } // With a @src_length of zero, the range from @src_offset->EOF is cloned! type btrfs_ioctl_clone_range_args struct { src_fd int64 src_offset uint64 src_length uint64 dest_offset uint64 } // flags for the defrag range ioctl type defragRange uint64 const ( _BTRFS_DEFRAG_RANGE_COMPRESS defragRange = 1 _BTRFS_DEFRAG_RANGE_START_IO defragRange = 2 ) const _BTRFS_SAME_DATA_DIFFERS = 1 // For extent-same ioctl type btrfs_ioctl_same_extent_info struct { fd int64 // in - destination file logical_offset uint64 // in - start of extent in destination bytes_deduped uint64 // out - total # of bytes we were able to dedupe from this file // out; status of this dedupe operation: // 0 if dedup succeeds // < 0 for error // == BTRFS_SAME_DATA_DIFFERS if data differs status int32 reserved uint32 } type btrfs_ioctl_same_args struct { logical_offset uint64 // in - start of extent in source length uint64 // in - length of extent dest_count uint16 // in - total elements in info array _ [6]byte //info [0]btrfs_ioctl_same_extent_info } type btrfs_ioctl_defrag_range_args struct { start uint64 // start of the defrag operation len uint64 // number of bytes to defrag, use (u64)-1 to say all // flags for the operation, which can include turning // on compression for this one defrag flags uint64 // any extent bigger than this will be considered // already defragged. Use 0 to take the kernel default // Use 1 to say every single extent must be rewritten extent_thresh uint32 // which compression method to use if turning on compression // for this defrag operation. If unspecified, zlib will be used compress_type uint32 _ [16]byte // spare for later } type btrfs_ioctl_space_info struct { flags uint64 total_bytes uint64 used_bytes uint64 } type btrfs_ioctl_space_args struct { space_slots uint64 total_spaces uint64 //spaces [0]btrfs_ioctl_space_info } type btrfs_data_container struct { bytes_left uint32 // out -- bytes not needed to deliver output bytes_missing uint32 // out -- additional bytes needed for result elem_cnt uint32 // out elem_missed uint32 // out //val [0]uint64 } type btrfs_ioctl_ino_path_args struct { inum uint64 // in size uint64 // in _ [32]byte // struct btrfs_data_container *fspath; out fspath uint64 // out } type btrfs_ioctl_logical_ino_args struct { logical uint64 // in size uint64 // in _ [32]byte // struct btrfs_data_container *inodes; out inodes uint64 } // disk I/O failure stats const ( _BTRFS_DEV_STAT_WRITE_ERRS = iota // EIO or EREMOTEIO from lower layers _BTRFS_DEV_STAT_READ_ERRS // EIO or EREMOTEIO from lower layers _BTRFS_DEV_STAT_FLUSH_ERRS // EIO or EREMOTEIO from lower layers // stats for indirect indications for I/O failures // checksum error, bytenr error or contents is illegal: this is an // indication that the block was damaged during read or write, or written to // wrong location or read from wrong location _BTRFS_DEV_STAT_CORRUPTION_ERRS _BTRFS_DEV_STAT_GENERATION_ERRS // an indication that blocks have not been written _BTRFS_DEV_STAT_VALUES_MAX ) // Reset statistics after reading; needs SYS_ADMIN capability const _BTRFS_DEV_STATS_RESET = (1 << 0) type btrfs_ioctl_get_dev_stats struct { devid uint64 // in nr_items uint64 // in/out flags uint64 // in/out values [_BTRFS_DEV_STAT_VALUES_MAX]uint64 // out values _ [128 - 2 - _BTRFS_DEV_STAT_VALUES_MAX]uint64 // pad to 1k } const ( _BTRFS_QUOTA_CTL_ENABLE = 1 _BTRFS_QUOTA_CTL_DISABLE = 2 // 3 has formerly been reserved for BTRFS_QUOTA_CTL_RESCAN ) type btrfs_ioctl_quota_ctl_args struct { cmd uint64 status uint64 } type btrfs_ioctl_quota_rescan_args struct { flags uint64 progress uint64 _ [6]uint64 } type btrfs_ioctl_qgroup_assign_args struct { assign uint64 src uint64 dst uint64 } type btrfs_ioctl_qgroup_create_args struct { create uint64 qgroupid uint64 } type btrfs_ioctl_timespec struct { sec uint64 nsec uint32 } type btrfs_ioctl_received_subvol_args struct { uuid UUID // in stransid uint64 // in rtransid uint64 // out stime btrfs_ioctl_timespec // in rtime btrfs_ioctl_timespec // out flags uint64 // in _ [16]uint64 // in } const ( // Caller doesn't want file data in the send stream, even if the // search of clone sources doesn't find an extent. UPDATE_EXTENT // commands will be sent instead of WRITE commands. _BTRFS_SEND_FLAG_NO_FILE_DATA = 0x1 // Do not add the leading stream header. Used when multiple snapshots // are sent back to back. _BTRFS_SEND_FLAG_OMIT_STREAM_HEADER = 0x2 // Omit the command at the end of the stream that indicated the end // of the stream. This option is used when multiple snapshots are // sent back to back. _BTRFS_SEND_FLAG_OMIT_END_CMD = 0x4 _BTRFS_SEND_FLAG_MASK = _BTRFS_SEND_FLAG_NO_FILE_DATA | _BTRFS_SEND_FLAG_OMIT_STREAM_HEADER | _BTRFS_SEND_FLAG_OMIT_END_CMD ) type btrfs_ioctl_send_args struct { send_fd int64 // in clone_sources_count uint64 // in clone_sources *objectID // in parent_root objectID // in flags uint64 // in _ [4]uint64 // in } var ( _BTRFS_IOC_SNAP_CREATE = ioctl.IOW(ioctlMagic, 1, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_DEFRAG = ioctl.IOW(ioctlMagic, 2, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_RESIZE = ioctl.IOW(ioctlMagic, 3, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_SCAN_DEV = ioctl.IOW(ioctlMagic, 4, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_TRANS_START = ioctl.IO(ioctlMagic, 6) _BTRFS_IOC_TRANS_END = ioctl.IO(ioctlMagic, 7) _BTRFS_IOC_SYNC = ioctl.IO(ioctlMagic, 8) _BTRFS_IOC_CLONE = ioctl.IOW(ioctlMagic, 9, 4) // int32 _BTRFS_IOC_ADD_DEV = ioctl.IOW(ioctlMagic, 10, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_RM_DEV = ioctl.IOW(ioctlMagic, 11, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_BALANCE = ioctl.IOW(ioctlMagic, 12, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_CLONE_RANGE = ioctl.IOW(ioctlMagic, 13, unsafe.Sizeof(btrfs_ioctl_clone_range_args{})) _BTRFS_IOC_SUBVOL_CREATE = ioctl.IOW(ioctlMagic, 14, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_SNAP_DESTROY = ioctl.IOW(ioctlMagic, 15, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_DEFRAG_RANGE = ioctl.IOW(ioctlMagic, 16, unsafe.Sizeof(btrfs_ioctl_defrag_range_args{})) _BTRFS_IOC_TREE_SEARCH = ioctl.IOWR(ioctlMagic, 17, unsafe.Sizeof(btrfs_ioctl_search_args{})) _BTRFS_IOC_INO_LOOKUP = ioctl.IOWR(ioctlMagic, 18, unsafe.Sizeof(btrfs_ioctl_ino_lookup_args{})) _BTRFS_IOC_DEFAULT_SUBVOL = ioctl.IOW(ioctlMagic, 19, 8) // uint64 _BTRFS_IOC_SPACE_INFO = ioctl.IOWR(ioctlMagic, 20, unsafe.Sizeof(btrfs_ioctl_space_args{})) _BTRFS_IOC_START_SYNC = ioctl.IOR(ioctlMagic, 24, 8) // uint64 _BTRFS_IOC_WAIT_SYNC = ioctl.IOW(ioctlMagic, 22, 8) // uint64 _BTRFS_IOC_SNAP_CREATE_V2 = ioctl.IOW(ioctlMagic, 23, unsafe.Sizeof(btrfs_ioctl_vol_args_v2{})) _BTRFS_IOC_SUBVOL_CREATE_V2 = ioctl.IOW(ioctlMagic, 24, unsafe.Sizeof(btrfs_ioctl_vol_args_v2{})) _BTRFS_IOC_SUBVOL_GETFLAGS = ioctl.IOR(ioctlMagic, 25, 8) // uint64 _BTRFS_IOC_SUBVOL_SETFLAGS = ioctl.IOW(ioctlMagic, 26, 8) // uint64 _BTRFS_IOC_SCRUB = ioctl.IOWR(ioctlMagic, 27, unsafe.Sizeof(btrfs_ioctl_scrub_args{})) _BTRFS_IOC_SCRUB_CANCEL = ioctl.IO(ioctlMagic, 28) _BTRFS_IOC_SCRUB_PROGRESS = ioctl.IOWR(ioctlMagic, 29, unsafe.Sizeof(btrfs_ioctl_scrub_args{})) _BTRFS_IOC_DEV_INFO = ioctl.IOWR(ioctlMagic, 30, unsafe.Sizeof(btrfs_ioctl_dev_info_args{})) _BTRFS_IOC_FS_INFO = ioctl.IOR(ioctlMagic, 31, unsafe.Sizeof(btrfs_ioctl_fs_info_args{})) _BTRFS_IOC_BALANCE_V2 = ioctl.IOWR(ioctlMagic, 32, unsafe.Sizeof(btrfs_ioctl_balance_args{})) _BTRFS_IOC_BALANCE_CTL = ioctl.IOW(ioctlMagic, 33, 4) // int32 _BTRFS_IOC_BALANCE_PROGRESS = ioctl.IOR(ioctlMagic, 34, unsafe.Sizeof(btrfs_ioctl_balance_args{})) _BTRFS_IOC_INO_PATHS = ioctl.IOWR(ioctlMagic, 35, unsafe.Sizeof(btrfs_ioctl_ino_path_args{})) _BTRFS_IOC_LOGICAL_INO = ioctl.IOWR(ioctlMagic, 36, unsafe.Sizeof(btrfs_ioctl_ino_path_args{})) _BTRFS_IOC_SET_RECEIVED_SUBVOL = ioctl.IOWR(ioctlMagic, 37, unsafe.Sizeof(btrfs_ioctl_received_subvol_args{})) _BTRFS_IOC_SEND = ioctl.IOW(ioctlMagic, 38, unsafe.Sizeof(btrfs_ioctl_send_args{})) _BTRFS_IOC_DEVICES_READY = ioctl.IOR(ioctlMagic, 39, unsafe.Sizeof(btrfs_ioctl_vol_args{})) _BTRFS_IOC_QUOTA_CTL = ioctl.IOWR(ioctlMagic, 40, unsafe.Sizeof(btrfs_ioctl_quota_ctl_args{})) _BTRFS_IOC_QGROUP_ASSIGN = ioctl.IOW(ioctlMagic, 41, unsafe.Sizeof(btrfs_ioctl_qgroup_assign_args{})) _BTRFS_IOC_QGROUP_CREATE = ioctl.IOW(ioctlMagic, 42, unsafe.Sizeof(btrfs_ioctl_qgroup_create_args{})) _BTRFS_IOC_QGROUP_LIMIT = ioctl.IOR(ioctlMagic, 43, unsafe.Sizeof(btrfs_ioctl_qgroup_limit_args{})) _BTRFS_IOC_QUOTA_RESCAN = ioctl.IOW(ioctlMagic, 44, unsafe.Sizeof(btrfs_ioctl_quota_rescan_args{})) _BTRFS_IOC_QUOTA_RESCAN_STATUS = ioctl.IOR(ioctlMagic, 45, unsafe.Sizeof(btrfs_ioctl_quota_rescan_args{})) _BTRFS_IOC_QUOTA_RESCAN_WAIT = ioctl.IO(ioctlMagic, 46) _BTRFS_IOC_GET_FSLABEL = ioctl.IOR(ioctlMagic, 49, labelSize) _BTRFS_IOC_SET_FSLABEL = ioctl.IOW(ioctlMagic, 50, labelSize) _BTRFS_IOC_GET_DEV_STATS = ioctl.IOWR(ioctlMagic, 52, unsafe.Sizeof(btrfs_ioctl_get_dev_stats{})) _BTRFS_IOC_DEV_REPLACE = ioctl.IOWR(ioctlMagic, 53, unsafe.Sizeof(btrfs_ioctl_dev_replace_args_u1{})) _BTRFS_IOC_FILE_EXTENT_SAME = ioctl.IOWR(ioctlMagic, 54, unsafe.Sizeof(btrfs_ioctl_same_args{})) _BTRFS_IOC_GET_FEATURES = ioctl.IOR(ioctlMagic, 57, unsafe.Sizeof(btrfs_ioctl_feature_flags{})) _BTRFS_IOC_SET_FEATURES = ioctl.IOW(ioctlMagic, 57, unsafe.Sizeof([2]btrfs_ioctl_feature_flags{})) _BTRFS_IOC_GET_SUPPORTED_FEATURES = ioctl.IOR(ioctlMagic, 57, unsafe.Sizeof([3]btrfs_ioctl_feature_flags{})) ) func iocSnapCreate(f *os.File, in *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_SNAP_CREATE, in) } func iocSnapCreateV2(f *os.File, in *btrfs_ioctl_vol_args_v2) error { return ioctl.Do(f, _BTRFS_IOC_SNAP_CREATE_V2, in) } func iocDefrag(f *os.File, out *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_DEFRAG, out) } func iocResize(f *os.File, in *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_RESIZE, in) } func iocScanDev(f *os.File, out *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_SCAN_DEV, out) } func iocTransStart(f *os.File) error { return ioctl.Do(f, _BTRFS_IOC_TRANS_START, nil) } func iocTransEnd(f *os.File) error { return ioctl.Do(f, _BTRFS_IOC_TRANS_END, nil) } func iocSync(f *os.File) error { return ioctl.Do(f, _BTRFS_IOC_SYNC, nil) } func iocClone(dst, src *os.File) error { return ioctl.Ioctl(dst, _BTRFS_IOC_CLONE, src.Fd()) } func iocAddDev(f *os.File, out *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_ADD_DEV, out) } func iocRmDev(f *os.File, out *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_RM_DEV, out) } func iocBalance(f *os.File, out *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_BALANCE, out) } func iocCloneRange(f *os.File, out *btrfs_ioctl_clone_range_args) error { return ioctl.Do(f, _BTRFS_IOC_CLONE_RANGE, out) } func iocSubvolCreate(f *os.File, in *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_SUBVOL_CREATE, in) } func iocSubvolCreateV2(f *os.File, in *btrfs_ioctl_vol_args_v2) error { return ioctl.Do(f, _BTRFS_IOC_SUBVOL_CREATE, in) } func iocSnapDestroy(f *os.File, in *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_SNAP_DESTROY, in) } func iocDefragRange(f *os.File, out *btrfs_ioctl_defrag_range_args) error { return ioctl.Do(f, _BTRFS_IOC_DEFRAG_RANGE, out) } func iocTreeSearch(f *os.File, out *btrfs_ioctl_search_args) error { return ioctl.Do(f, _BTRFS_IOC_TREE_SEARCH, out) } func iocInoLookup(f *os.File, out *btrfs_ioctl_ino_lookup_args) error { return ioctl.Do(f, _BTRFS_IOC_INO_LOOKUP, out) } func iocDefaultSubvol(f *os.File, out *uint64) error { return ioctl.Do(f, _BTRFS_IOC_DEFAULT_SUBVOL, out) } type spaceFlags uint64 func (f spaceFlags) BlockGroup() blockGroup { return blockGroup(f) & _BTRFS_BLOCK_GROUP_MASK } type spaceInfo struct { Flags spaceFlags TotalBytes uint64 UsedBytes uint64 } func iocSpaceInfo(f *os.File) ([]spaceInfo, error) { arg := &btrfs_ioctl_space_args{} if err := ioctl.Do(f, _BTRFS_IOC_SPACE_INFO, arg); err != nil { return nil, err } n := arg.total_spaces if n == 0 { return nil, nil } const ( argSize = unsafe.Sizeof(btrfs_ioctl_space_args{}) infoSize = unsafe.Sizeof(btrfs_ioctl_space_info{}) ) buf := make([]byte, argSize+uintptr(n)*infoSize) basePtr := unsafe.Pointer(&buf[0]) arg = (*btrfs_ioctl_space_args)(basePtr) arg.space_slots = n if err := ioctl.Do(f, _BTRFS_IOC_SPACE_INFO, arg); err != nil { return nil, err } else if arg.total_spaces == 0 { return nil, nil } if n > arg.total_spaces { n = arg.total_spaces } out := make([]spaceInfo, n) ptr := uintptr(basePtr) + argSize for i := 0; i < int(n); i++ { info := (*btrfs_ioctl_space_info)(unsafe.Pointer(ptr)) out[i] = spaceInfo{ Flags: spaceFlags(info.flags), TotalBytes: info.total_bytes, UsedBytes: info.used_bytes, } ptr += infoSize } return out, nil } func iocStartSync(f *os.File, out *uint64) error { return ioctl.Do(f, _BTRFS_IOC_START_SYNC, out) } func iocWaitSync(f *os.File, out *uint64) error { return ioctl.Do(f, _BTRFS_IOC_WAIT_SYNC, out) } func iocSubvolGetflags(f *os.File) (out SubvolFlags, err error) { err = ioctl.Do(f, _BTRFS_IOC_SUBVOL_GETFLAGS, &out) return } func iocSubvolSetflags(f *os.File, flags SubvolFlags) error { v := uint64(flags) return ioctl.Do(f, _BTRFS_IOC_SUBVOL_SETFLAGS, &v) } func iocScrub(f *os.File, out *btrfs_ioctl_scrub_args) error { return ioctl.Do(f, _BTRFS_IOC_SCRUB, out) } func iocScrubCancel(f *os.File) error { return ioctl.Do(f, _BTRFS_IOC_SCRUB_CANCEL, nil) } func iocScrubProgress(f *os.File, out *btrfs_ioctl_scrub_args) error { return ioctl.Do(f, _BTRFS_IOC_SCRUB_PROGRESS, out) } func iocFsInfo(f *os.File) (out btrfs_ioctl_fs_info_args, err error) { err = ioctl.Do(f, _BTRFS_IOC_FS_INFO, &out) return } func iocDevInfo(f *os.File, devid uint64, uuid UUID) (out btrfs_ioctl_dev_info_args, err error) { out.devid = devid out.uuid = uuid err = ioctl.Do(f, _BTRFS_IOC_DEV_INFO, &out) return } func iocBalanceV2(f *os.File, out *btrfs_ioctl_balance_args) error { return ioctl.Do(f, _BTRFS_IOC_BALANCE_V2, out) } func iocBalanceCtl(f *os.File, out *int32) error { return ioctl.Do(f, _BTRFS_IOC_BALANCE_CTL, out) } func iocBalanceProgress(f *os.File, out *btrfs_ioctl_balance_args) error { return ioctl.Do(f, _BTRFS_IOC_BALANCE_PROGRESS, out) } func iocInoPaths(f *os.File, out *btrfs_ioctl_ino_path_args) error { return ioctl.Do(f, _BTRFS_IOC_INO_PATHS, out) } func iocLogicalIno(f *os.File, out *btrfs_ioctl_ino_path_args) error { return ioctl.Do(f, _BTRFS_IOC_LOGICAL_INO, out) } func iocSetReceivedSubvol(f *os.File, out *btrfs_ioctl_received_subvol_args) error { return ioctl.Do(f, _BTRFS_IOC_SET_RECEIVED_SUBVOL, out) } func iocSend(f *os.File, in *btrfs_ioctl_send_args) error { return ioctl.Do(f, _BTRFS_IOC_SEND, in) } func iocDevicesReady(f *os.File, out *btrfs_ioctl_vol_args) error { return ioctl.Do(f, _BTRFS_IOC_DEVICES_READY, out) } func iocQuotaCtl(f *os.File, out *btrfs_ioctl_quota_ctl_args) error { return ioctl.Do(f, _BTRFS_IOC_QUOTA_CTL, out) } func iocQgroupAssign(f *os.File, out *btrfs_ioctl_qgroup_assign_args) error { return ioctl.Do(f, _BTRFS_IOC_QGROUP_ASSIGN, out) } func iocQgroupCreate(f *os.File, out *btrfs_ioctl_qgroup_create_args) error { return ioctl.Do(f, _BTRFS_IOC_QGROUP_CREATE, out) } func iocQgroupLimit(f *os.File, out *btrfs_ioctl_qgroup_limit_args) error { return ioctl.Do(f, _BTRFS_IOC_QGROUP_LIMIT, out) } func iocQuotaRescan(f *os.File, out *btrfs_ioctl_quota_rescan_args) error { return ioctl.Do(f, _BTRFS_IOC_QUOTA_RESCAN, out) } func iocQuotaRescanStatus(f *os.File, out *btrfs_ioctl_quota_rescan_args) error { return ioctl.Do(f, _BTRFS_IOC_QUOTA_RESCAN_STATUS, out) } func iocQuotaRescanWait(f *os.File) error { return ioctl.Do(f, _BTRFS_IOC_QUOTA_RESCAN_WAIT, nil) } func iocGetFslabel(f *os.File, out *[labelSize]byte) error { return ioctl.Do(f, _BTRFS_IOC_GET_FSLABEL, out) } func iocSetFslabel(f *os.File, out *[labelSize]byte) error { return ioctl.Do(f, _BTRFS_IOC_SET_FSLABEL, out) } func iocGetDevStats(f *os.File, out *btrfs_ioctl_get_dev_stats) error { return ioctl.Do(f, _BTRFS_IOC_GET_DEV_STATS, out) } //func iocDevReplace(f *os.File, out *btrfs_ioctl_dev_replace_args) error { // return ioctl.Do(f, _BTRFS_IOC_DEV_REPLACE, out) //} func iocFileExtentSame(f *os.File, out *btrfs_ioctl_same_args) error { return ioctl.Do(f, _BTRFS_IOC_FILE_EXTENT_SAME, out) } func iocSetFeatures(f *os.File, out *[2]btrfs_ioctl_feature_flags) error { return ioctl.Do(f, _BTRFS_IOC_SET_FEATURES, out) } golang-github-dennwc-btrfs-0.0~git20240418.0167142/mtab/000077500000000000000000000000001461022635500217735ustar00rootroot00000000000000golang-github-dennwc-btrfs-0.0~git20240418.0167142/mtab/mtab.go000066400000000000000000000013361461022635500232500ustar00rootroot00000000000000// Package mtab contains tools to work with /etc/mtab file. package mtab import ( "bufio" "io" "os" "strings" ) type MountPoint struct { Dev string Mount string Type string Opts string } // Mounts returns a list of mount point from /etc/mtab. func Mounts() ([]MountPoint, error) { file, err := os.Open("/etc/mtab") if err != nil { return nil, err } defer file.Close() r := bufio.NewReader(file) var out []MountPoint for { line, err := r.ReadString('\n') if err == io.EOF { break } else if err != nil { return nil, err } fields := strings.Fields(line) out = append(out, MountPoint{ Dev: fields[0], Mount: fields[1], Type: fields[2], Opts: fields[3], }) } return out, nil } golang-github-dennwc-btrfs-0.0~git20240418.0167142/receive.go000066400000000000000000000021711461022635500230220ustar00rootroot00000000000000package btrfs import ( "bytes" "errors" "io" "os" "os/exec" "path/filepath" "syscall" ) const nativeReceive = false func Receive(r io.Reader, dstDir string) error { if !nativeReceive { buf := bytes.NewBuffer(nil) cmd := exec.Command("btrfs", "receive", dstDir) cmd.Stdin = r cmd.Stderr = buf if err := cmd.Run(); err != nil { if buf.Len() != 0 { return errors.New(buf.String()) } return err } return nil } var err error dstDir, err = filepath.Abs(dstDir) if err != nil { return err } realMnt, err := findMountRoot(dstDir) if err != nil { return err } dir, err := os.OpenFile(dstDir, os.O_RDONLY|syscall.O_NOATIME, 0755) if err != nil { return err } mnt, err := os.OpenFile(realMnt, os.O_RDONLY|syscall.O_NOATIME, 0755) if err != nil { return err } // We want to resolve the path to the subvolume we're sitting in // so that we can adjust the paths of any subvols we want to receive in. subvolID, err := getFileRootID(mnt) if err != nil { return err } //sr, err := send.NewStreamReader(r) //if err != nil { // return err //} _, _ = dir, subvolID panic("not implemented") } golang-github-dennwc-btrfs-0.0~git20240418.0167142/send.go000066400000000000000000000140701461022635500223320ustar00rootroot00000000000000package btrfs import ( "fmt" "io" "os" "path/filepath" "unsafe" ) func Send(w io.Writer, parent string, subvols ...string) error { if len(subvols) == 0 { return nil } // use first send subvol to determine mount_root subvol, err := filepath.Abs(subvols[0]) if err != nil { return err } mountRoot, err := findMountRoot(subvol) if err == os.ErrNotExist { return fmt.Errorf("cannot find a mountpoint for %s", subvol) } else if err != nil { return err } var ( cloneSrc []objectID parentID objectID ) if parent != "" { parent, err = filepath.Abs(parent) if err != nil { return err } id, err := getPathRootID(parent) if err != nil { return fmt.Errorf("cannot get parent root id: %v", err) } parentID = id cloneSrc = append(cloneSrc, id) } // check all subvolumes paths := make([]string, 0, len(subvols)) for _, sub := range subvols { sub, err = filepath.Abs(sub) if err != nil { return err } paths = append(paths, sub) mount, err := findMountRoot(sub) if err != nil { return fmt.Errorf("cannot find mount root for %v: %v", sub, err) } else if mount != mountRoot { return fmt.Errorf("all subvolumes must be from the same filesystem (%s is not)", sub) } ok, err := IsReadOnly(sub) if err != nil { return err } else if !ok { return fmt.Errorf("subvolume %s is not read-only", sub) } } mfs, err := Open(mountRoot, true) if err != nil { return err } defer mfs.Close() full := len(cloneSrc) == 0 for i, sub := range paths { var rootID objectID if !full && parent != "" { rel, err := filepath.Rel(mountRoot, sub) if err != nil { return err } si, err := subvolSearchByPath(mfs.f, rel) if err != nil { return fmt.Errorf("cannot find subvolume %s: %v", rel, err) } rootID = objectID(si.RootID) parentID, err = findGoodParent(mfs.f, rootID, cloneSrc) if err != nil { return fmt.Errorf("cannot find good parent for %v: %v", rel, err) } } fs, err := Open(sub, true) if err != nil { return err } var flags uint64 if i != 0 { // not first flags |= _BTRFS_SEND_FLAG_OMIT_STREAM_HEADER } if i < len(paths)-1 { // not last flags |= _BTRFS_SEND_FLAG_OMIT_END_CMD } err = send(w, fs.f, parentID, cloneSrc, flags) fs.Close() if err != nil { return fmt.Errorf("error sending %s: %v", sub, err) } if !full && parent != "" { cloneSrc = append(cloneSrc, rootID) } } return nil } func send(w io.Writer, subvol *os.File, parent objectID, sources []objectID, flags uint64) error { pr, pw, err := os.Pipe() if err != nil { return err } errc := make(chan error, 1) go func() { defer pr.Close() _, err := io.Copy(w, pr) errc <- err }() fd := pw.Fd() wait := func() error { pw.Close() return <-errc } args := &btrfs_ioctl_send_args{ send_fd: int64(fd), parent_root: parent, flags: flags, } if len(sources) != 0 { args.clone_sources = &sources[0] args.clone_sources_count = uint64(len(sources)) } if err := iocSend(subvol, args); err != nil { wait() return err } return wait() } // readRootItem reads a root item from the tree. // // TODO(dennwc): support older kernels: // In case we detect a root item smaller then sizeof(root_item), // we know it's an old version of the root structure and initialize all new fields to zero. // The same happens if we detect mismatching generation numbers as then we know the root was // once mounted with an older kernel that was not aware of the root item structure change. func readRootItem(mnt *os.File, rootID objectID) (*rootItem, error) { sk := btrfs_ioctl_search_key{ tree_id: rootTreeObjectid, // There may be more than one ROOT_ITEM key if there are // snapshots pending deletion, we have to loop through them. min_objectid: rootID, max_objectid: rootID, min_type: rootItemKey, max_type: rootItemKey, max_offset: maxUint64, max_transid: maxUint64, nr_items: 4096, } for ; sk.min_offset < maxUint64; sk.min_offset++ { results, err := treeSearchRaw(mnt, sk) if err != nil { return nil, err } else if len(results) == 0 { break } for _, r := range results { sk.min_objectid = r.ObjectID sk.min_type = r.Type sk.min_offset = r.Offset if r.ObjectID > rootID { break } if r.ObjectID == rootID && r.Type == rootItemKey { const sz = int(unsafe.Sizeof(btrfs_root_item_raw{})) if len(r.Data) > sz { return nil, fmt.Errorf("btrfs_root_item is larger than expected; kernel is newer than the library") } else if len(r.Data) < sz { // TODO return nil, fmt.Errorf("btrfs_root_item is smaller then expected; kernel version is too old") } p := asRootItem(r.Data).Decode() return &p, nil } } results = nil if sk.min_type != rootItemKey || sk.min_objectid != rootID { break } } return nil, ErrNotFound } func getParent(mnt *os.File, rootID objectID) (*SubvolInfo, error) { st, err := subvolSearchByRootID(mnt, rootID, "") if err != nil { return nil, fmt.Errorf("cannot find subvolume %d to determine parent: %v", rootID, err) } return subvolSearchByUUID(mnt, st.ParentUUID) } func findGoodParent(mnt *os.File, rootID objectID, cloneSrc []objectID) (objectID, error) { parent, err := getParent(mnt, rootID) if err != nil { return 0, fmt.Errorf("get parent failed: %v", err) } for _, id := range cloneSrc { if id == objectID(parent.RootID) { return objectID(parent.RootID), nil } } var ( bestParent *SubvolInfo bestDiff uint64 = maxUint64 ) for _, id := range cloneSrc { parent2, err := getParent(mnt, id) if err == ErrNotFound { continue } else if err != nil { return 0, err } if parent2.RootID != parent.RootID { continue } parent2, err = subvolSearchByRootID(mnt, id, "") if err != nil { return 0, err } diff := int64(parent2.CTransID - parent.CTransID) if diff < 0 { diff = -diff } if uint64(diff) < bestDiff { bestParent, bestDiff = parent2, uint64(diff) } } if bestParent != nil { return objectID(bestParent.RootID), nil } if !parent.ParentUUID.IsZero() { return findGoodParent(mnt, objectID(parent.RootID), cloneSrc) } return 0, ErrNotFound } golang-github-dennwc-btrfs-0.0~git20240418.0167142/send/000077500000000000000000000000001461022635500220015ustar00rootroot00000000000000golang-github-dennwc-btrfs-0.0~git20240418.0167142/send/send.go000066400000000000000000000241231461022635500232630ustar00rootroot00000000000000package send import ( "errors" "fmt" "github.com/dennwc/btrfs" "io" "io/ioutil" "time" ) func NewStreamReader(r io.Reader) (*StreamReader, error) { // read magic and version buf := make([]byte, len(sendStreamMagic)+4) _, err := io.ReadFull(r, buf) if err != nil { return nil, fmt.Errorf("cannot read magic: %v", err) } else if string(buf[:sendStreamMagicSize]) != sendStreamMagic { return nil, errors.New("unexpected stream header") } version := sendEndianess.Uint32(buf[sendStreamMagicSize:]) if version != sendStreamVersion { return nil, fmt.Errorf("stream version %d not supported", version) } return &StreamReader{r: r}, nil } type StreamReader struct { r io.Reader buf [cmdHeaderSize]byte } func (r *StreamReader) readCmdHeader() (h cmdHeader, err error) { _, err = io.ReadFull(r.r, r.buf[:cmdHeaderSize]) if err == io.EOF { return } else if err != nil { err = fmt.Errorf("cannot read command header: %v", err) return } err = h.Unmarshal(r.buf[:cmdHeaderSize]) // TODO: check CRC return } type SendTLV struct { Attr sendCmdAttr Val interface{} } func (r *StreamReader) readTLV(rd io.Reader) (*SendTLV, error) { _, err := io.ReadFull(rd, r.buf[:tlvHeaderSize]) if err == io.EOF { return nil, err } else if err != nil { return nil, fmt.Errorf("cannot read tlv header: %v", err) } var h tlvHeader if err = h.Unmarshal(r.buf[:tlvHeaderSize]); err != nil { return nil, err } typ := sendCmdAttr(h.Type) if sendCmdAttr(typ) > sendAttrMax { // || th.Len > _BTRFS_SEND_BUF_SIZE { return nil, fmt.Errorf("invalid tlv in cmd: %q", typ) } buf := make([]byte, h.Len) _, err = io.ReadFull(rd, buf) if err != nil { return nil, fmt.Errorf("cannot read tlv: %v", err) } var v interface{} switch typ { case sendAttrCtransid, sendAttrCloneCtransid, sendAttrUid, sendAttrGid, sendAttrMode, sendAttrIno, sendAttrFileOffset, sendAttrSize, sendAttrCloneOffset, sendAttrCloneLen: if len(buf) != 8 { return nil, fmt.Errorf("unexpected int64 size: %v", h.Len) } v = sendEndianess.Uint64(buf[:8]) case sendAttrPath, sendAttrPathTo, sendAttrClonePath, sendAttrXattrName: v = string(buf) case sendAttrData, sendAttrXattrData: v = buf case sendAttrUuid, sendAttrCloneUuid: if h.Len != btrfs.UUIDSize { return nil, fmt.Errorf("unexpected UUID size: %v", h.Len) } var u btrfs.UUID copy(u[:], buf) v = u case sendAttrAtime, sendAttrMtime, sendAttrCtime, sendAttrOtime: if h.Len != 12 { return nil, fmt.Errorf("unexpected timestamp size: %v", h.Len) } v = time.Unix( // btrfs_timespec int64(sendEndianess.Uint64(buf[:8])), int64(sendEndianess.Uint32(buf[8:])), ) default: return nil, fmt.Errorf("unsupported tlv type: %v (len: %v)", typ, h.Len) } return &SendTLV{Attr: typ, Val: v}, nil } func (r *StreamReader) ReadCommand() (_ Cmd, gerr error) { h, err := r.readCmdHeader() if err != nil { return nil, err } var tlvs []SendTLV rd := io.LimitReader(r.r, int64(h.Len)) defer io.Copy(ioutil.Discard, rd) for { tlv, err := r.readTLV(rd) if err == io.EOF { break } else if err != nil { return nil, fmt.Errorf("command %v: %v", h.Cmd, err) } tlvs = append(tlvs, *tlv) } var c Cmd switch h.Cmd { case sendCmdEnd: c = &StreamEnd{} case sendCmdSubvol: c = &SubvolCmd{} case sendCmdSnapshot: c = &SnapshotCmd{} case sendCmdChown: c = &ChownCmd{} case sendCmdChmod: c = &ChmodCmd{} case sendCmdUtimes: c = &UTimesCmd{} case sendCmdMkdir: c = &MkdirCmd{} case sendCmdRename: c = &RenameCmd{} case sendCmdMkfile: c = &MkfileCmd{} case sendCmdWrite: c = &WriteCmd{} case sendCmdTruncate: c = &TruncateCmd{} } if c == nil { return &UnknownSendCmd{Kind: h.Cmd, Params: tlvs}, nil } if err := c.decode(tlvs); err != nil { return nil, err } return c, nil } type errUnexpectedAttrType struct { Cmd CmdType Val SendTLV } func (e errUnexpectedAttrType) Error() string { return fmt.Sprintf("unexpected type for %q (in %q): %T", e.Val.Attr, e.Cmd, e.Val.Val) } type errUnexpectedAttr struct { Cmd CmdType Val SendTLV } func (e errUnexpectedAttr) Error() string { return fmt.Sprintf("unexpected attr %q for %q (%T)", e.Val.Attr, e.Cmd, e.Val.Val) } type Cmd interface { Type() CmdType decode(tlvs []SendTLV) error } type UnknownSendCmd struct { Kind CmdType Params []SendTLV } func (c UnknownSendCmd) Type() CmdType { return c.Kind } func (c *UnknownSendCmd) decode(tlvs []SendTLV) error { c.Params = tlvs return nil } type StreamEnd struct{} func (c StreamEnd) Type() CmdType { return sendCmdEnd } func (c *StreamEnd) decode(tlvs []SendTLV) error { if len(tlvs) != 0 { return fmt.Errorf("unexpected TLVs for stream end command: %#v", tlvs) } return nil } type SubvolCmd struct { Path string UUID btrfs.UUID CTransID uint64 } func (c SubvolCmd) Type() CmdType { return sendCmdSubvol } func (c *SubvolCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.Path, ok = tlv.Val.(string) case sendAttrUuid: c.UUID, ok = tlv.Val.(btrfs.UUID) case sendAttrCtransid: c.CTransID, ok = tlv.Val.(uint64) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } type SnapshotCmd struct { Path string UUID btrfs.UUID CTransID uint64 CloneUUID btrfs.UUID CloneTransID uint64 } func (c SnapshotCmd) Type() CmdType { return sendCmdSnapshot } func (c *SnapshotCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.Path, ok = tlv.Val.(string) case sendAttrUuid: c.UUID, ok = tlv.Val.(btrfs.UUID) case sendAttrCtransid: c.CTransID, ok = tlv.Val.(uint64) case sendAttrCloneUuid: c.CloneUUID, ok = tlv.Val.(btrfs.UUID) case sendAttrCloneCtransid: c.CloneTransID, ok = tlv.Val.(uint64) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } type ChownCmd struct { Path string UID, GID uint64 } func (c ChownCmd) Type() CmdType { return sendCmdChown } func (c *ChownCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.Path, ok = tlv.Val.(string) case sendAttrUid: c.UID, ok = tlv.Val.(uint64) case sendAttrGid: c.GID, ok = tlv.Val.(uint64) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } type ChmodCmd struct { Path string Mode uint64 } func (c ChmodCmd) Type() CmdType { return sendCmdChmod } func (c *ChmodCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.Path, ok = tlv.Val.(string) case sendAttrMode: c.Mode, ok = tlv.Val.(uint64) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } type UTimesCmd struct { Path string ATime, MTime, CTime time.Time } func (c UTimesCmd) Type() CmdType { return sendCmdUtimes } func (c *UTimesCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.Path, ok = tlv.Val.(string) case sendAttrAtime: c.ATime, ok = tlv.Val.(time.Time) case sendAttrMtime: c.MTime, ok = tlv.Val.(time.Time) case sendAttrCtime: c.CTime, ok = tlv.Val.(time.Time) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } type MkdirCmd struct { Path string Ino uint64 } func (c MkdirCmd) Type() CmdType { return sendCmdMkdir } func (c *MkdirCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.Path, ok = tlv.Val.(string) case sendAttrIno: c.Ino, ok = tlv.Val.(uint64) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } type RenameCmd struct { From, To string } func (c RenameCmd) Type() CmdType { return sendCmdRename } func (c *RenameCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.From, ok = tlv.Val.(string) case sendAttrPathTo: c.To, ok = tlv.Val.(string) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } type MkfileCmd struct { Path string Ino uint64 } func (c MkfileCmd) Type() CmdType { return sendCmdMkfile } func (c *MkfileCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.Path, ok = tlv.Val.(string) case sendAttrIno: c.Ino, ok = tlv.Val.(uint64) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } type WriteCmd struct { Path string Off uint64 Data []byte } func (c WriteCmd) Type() CmdType { return sendCmdWrite } func (c *WriteCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.Path, ok = tlv.Val.(string) case sendAttrFileOffset: c.Off, ok = tlv.Val.(uint64) case sendAttrData: c.Data, ok = tlv.Val.([]byte) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } type TruncateCmd struct { Path string Size uint64 } func (c TruncateCmd) Type() CmdType { return sendCmdTruncate } func (c *TruncateCmd) decode(tlvs []SendTLV) error { for _, tlv := range tlvs { var ok bool switch tlv.Attr { case sendAttrPath: c.Path, ok = tlv.Val.(string) case sendAttrSize: c.Size, ok = tlv.Val.(uint64) default: return errUnexpectedAttr{Val: tlv, Cmd: c.Type()} } if !ok { return errUnexpectedAttrType{Val: tlv, Cmd: c.Type()} } } return nil } golang-github-dennwc-btrfs-0.0~git20240418.0167142/send/send_h.go000066400000000000000000000060751461022635500236000ustar00rootroot00000000000000package send import ( "encoding/binary" "io" "strconv" ) var sendEndianess = binary.LittleEndian const ( sendStreamMagic = "btrfs-stream\x00" sendStreamMagicSize = len(sendStreamMagic) sendStreamVersion = 1 ) const ( sendBufSize = 64 * 1024 sendReadSize = 48 * 1024 ) const cmdHeaderSize = 10 type cmdHeader struct { Len uint32 // len excluding the header Cmd CmdType Crc uint32 // crc including the header with zero crc field } func (h *cmdHeader) Size() int { return cmdHeaderSize } func (h *cmdHeader) Unmarshal(p []byte) error { if len(p) < cmdHeaderSize { return io.ErrUnexpectedEOF } h.Len = sendEndianess.Uint32(p[0:]) h.Cmd = CmdType(sendEndianess.Uint16(p[4:])) h.Crc = sendEndianess.Uint32(p[6:]) return nil } const tlvHeaderSize = 4 type tlvHeader struct { Type uint16 Len uint16 // len excluding the header } func (h *tlvHeader) Size() int { return tlvHeaderSize } func (h *tlvHeader) Unmarshal(p []byte) error { if len(p) < tlvHeaderSize { return io.ErrUnexpectedEOF } h.Type = sendEndianess.Uint16(p[0:]) h.Len = sendEndianess.Uint16(p[2:]) return nil } type CmdType uint16 func (c CmdType) String() string { var name string if int(c) < len(cmdTypeNames) { name = cmdTypeNames[int(c)] } if name != "" { return name } return strconv.FormatInt(int64(c), 16) } var cmdTypeNames = []string{ "", "subvol", "snapshot", "mkfile", "mkdir", "mknod", "mkfifo", "mksock", "symlink", "rename", "link", "unlink", "rmdir", "set_xattr", "remove_xattr", "write", "clone", "truncate", "chmod", "chown", "utimes", "end", "update_extent", "", } const ( sendCmdUnspec = CmdType(iota) sendCmdSubvol sendCmdSnapshot sendCmdMkfile sendCmdMkdir sendCmdMknod sendCmdMkfifo sendCmdMksock sendCmdSymlink sendCmdRename sendCmdLink sendCmdUnlink sendCmdRmdir sendCmdSetXattr sendCmdRemoveXattr sendCmdWrite sendCmdClone sendCmdTruncate sendCmdChmod sendCmdChown sendCmdUtimes sendCmdEnd sendCmdUpdateExtent _sendCmdMax ) const sendCmdMax = _sendCmdMax - 1 type sendCmdAttr uint16 func (c sendCmdAttr) String() string { var name string if int(c) < len(sendAttrNames) { name = sendAttrNames[int(c)] } if name != "" { return name } return strconv.FormatInt(int64(c), 16) } const ( sendAttrUnspec = sendCmdAttr(iota) sendAttrUuid sendAttrCtransid sendAttrIno sendAttrSize sendAttrMode sendAttrUid sendAttrGid sendAttrRdev sendAttrCtime sendAttrMtime sendAttrAtime sendAttrOtime sendAttrXattrName sendAttrXattrData sendAttrPath sendAttrPathTo sendAttrPathLink sendAttrFileOffset sendAttrData sendAttrCloneUuid sendAttrCloneCtransid sendAttrClonePath sendAttrCloneOffset sendAttrCloneLen _sendAttrMax ) const sendAttrMax = _sendAttrMax - 1 var sendAttrNames = []string{ "", "uuid", "ctransid", "ino", "size", "mode", "uid", "gid", "rdev", "ctime", "mtime", "atime", "otime", "xattrname", "xattrdata", "path", "pathto", "pathlink", "fileoffset", "data", "cloneuuid", "clonectransid", "clonepath", "cloneoffset", "clonelen", "", } golang-github-dennwc-btrfs-0.0~git20240418.0167142/size_test.go000066400000000000000000000050531461022635500234130ustar00rootroot00000000000000package btrfs import ( "reflect" "testing" "unsafe" ) var caseSizes = []struct { obj interface{} size int }{ {obj: btrfs_ioctl_vol_args{}, size: 4096}, {obj: btrfs_qgroup_limit{}, size: 40}, {obj: btrfs_qgroup_inherit{}, size: 72}, {obj: btrfs_ioctl_qgroup_limit_args{}, size: 48}, {obj: btrfs_ioctl_vol_args_v2{}, size: 4096}, {obj: btrfs_scrub_progress{}, size: 120}, {obj: btrfs_ioctl_scrub_args{}, size: 1024}, {obj: btrfs_ioctl_dev_replace_start_params{}, size: 2072}, {obj: btrfs_ioctl_dev_replace_status_params{}, size: 48}, {obj: btrfs_ioctl_dev_replace_args_u1{}, size: 2600}, {obj: btrfs_ioctl_dev_replace_args_u2{}, size: 2600}, {obj: btrfs_ioctl_dev_info_args{}, size: 4096}, {obj: btrfs_ioctl_fs_info_args{}, size: 1024}, {obj: btrfs_ioctl_feature_flags{}, size: 24}, {obj: btrfs_balance_args{}, size: 136}, {obj: BalanceProgress{}, size: 24}, {obj: btrfs_ioctl_balance_args{}, size: 1024}, {obj: btrfs_ioctl_ino_lookup_args{}, size: 4096}, {obj: btrfs_ioctl_search_key{}, size: 104}, {obj: btrfs_ioctl_search_header{}, size: 32}, {obj: btrfs_ioctl_search_args{}, size: 4096}, {obj: btrfs_ioctl_search_args_v2{}, size: 112}, {obj: btrfs_ioctl_clone_range_args{}, size: 32}, {obj: btrfs_ioctl_same_extent_info{}, size: 32}, {obj: btrfs_ioctl_same_args{}, size: 24}, {obj: btrfs_ioctl_defrag_range_args{}, size: 48}, {obj: btrfs_ioctl_space_info{}, size: 24}, {obj: btrfs_ioctl_space_args{}, size: 16}, {obj: btrfs_data_container{}, size: 16}, {obj: btrfs_ioctl_ino_path_args{}, size: 56}, {obj: btrfs_ioctl_logical_ino_args{}, size: 56}, {obj: btrfs_ioctl_get_dev_stats{}, size: 1032}, {obj: btrfs_ioctl_quota_ctl_args{}, size: 16}, {obj: btrfs_ioctl_qgroup_assign_args{}, size: 24}, {obj: btrfs_ioctl_qgroup_create_args{}, size: 16}, {obj: btrfs_ioctl_timespec{}, size: 16}, {obj: btrfs_ioctl_received_subvol_args{}, size: 200}, {obj: btrfs_ioctl_send_args{}, size: 72}, //{obj:btrfs_timespec{},size:12}, //{obj:btrfs_root_ref{},size:18}, //{obj:btrfs_root_item{},size:439}, {obj: btrfs_root_item_raw{}, size: 439}, {obj: btrfs_root_item_raw_p1{}, size: 439 - 23 - int(unsafe.Sizeof(btrfs_root_item_raw_p3{}))}, {obj: btrfs_root_item_raw_p3{}, size: 439 - 23 - int(unsafe.Sizeof(btrfs_root_item_raw_p1{}))}, //{obj:btrfs_inode_item{},size:160}, {obj: btrfs_inode_item_raw{}, size: 160}, {obj: timeBlock{}, size: 4 * 12}, } func TestSizes(t *testing.T) { for _, c := range caseSizes { if sz := int(reflect.ValueOf(c.obj).Type().Size()); sz != c.size { t.Errorf("unexpected size of %T: %d (exp: %d)", c.obj, sz, c.size) } } } golang-github-dennwc-btrfs-0.0~git20240418.0167142/subvolume.go000066400000000000000000000207611461022635500234260ustar00rootroot00000000000000package btrfs import ( "fmt" "os" "path/filepath" "strings" "syscall" "time" ) func checkSubVolumeName(name string) bool { return name != "" && name[0] != 0 && !strings.ContainsRune(name, '/') && name != "." && name != ".." } func IsSubVolume(path string) (bool, error) { var st syscall.Stat_t if err := syscall.Stat(path, &st); err != nil { return false, &os.PathError{Op: "stat", Path: path, Err: err} } if objectID(st.Ino) != firstFreeObjectid || st.Mode&syscall.S_IFMT != syscall.S_IFDIR { return false, nil } return isBtrfs(path) } func CreateSubVolume(path string) error { var inherit *btrfs_qgroup_inherit // TODO cpath, err := filepath.Abs(path) if err != nil { return err } newName := filepath.Base(cpath) dstDir := filepath.Dir(cpath) if !checkSubVolumeName(newName) { return fmt.Errorf("invalid subvolume name: %s", newName) } else if len(newName) >= volNameMax { return fmt.Errorf("subvolume name too long: %s", newName) } dst, err := openDir(dstDir) if err != nil { return err } defer dst.Close() if inherit != nil { panic("not implemented") // TODO args := btrfs_ioctl_vol_args_v2{ flags: subvolQGroupInherit, btrfs_ioctl_vol_args_v2_u1: btrfs_ioctl_vol_args_v2_u1{ //size: qgroup_inherit_size(inherit), qgroup_inherit: inherit, }, } copy(args.name[:], newName) return iocSubvolCreateV2(dst, &args) } var args btrfs_ioctl_vol_args copy(args.name[:], newName) return iocSubvolCreate(dst, &args) } func DeleteSubVolume(path string) error { if ok, err := IsSubVolume(path); err != nil { return err } else if !ok { return fmt.Errorf("not a subvolume: %s", path) } cpath, err := filepath.Abs(path) if err != nil { return err } dname := filepath.Dir(cpath) vname := filepath.Base(cpath) dir, err := openDir(dname) if err != nil { return err } defer dir.Close() var args btrfs_ioctl_vol_args copy(args.name[:], vname) return iocSnapDestroy(dir, &args) } func SnapshotSubVolume(subvol, dst string, ro bool) error { if ok, err := IsSubVolume(subvol); err != nil { return err } else if !ok { return fmt.Errorf("not a subvolume: %s", subvol) } exists := false if st, err := os.Stat(dst); err != nil && !os.IsNotExist(err) { return err } else if err == nil { if !st.IsDir() { return fmt.Errorf("'%s' exists and it is not a directory", dst) } exists = true } var ( newName string dstDir string ) if exists { newName = filepath.Base(subvol) dstDir = dst } else { newName = filepath.Base(dst) dstDir = filepath.Dir(dst) } if !checkSubVolumeName(newName) { return fmt.Errorf("invalid snapshot name '%s'", newName) } else if len(newName) >= volNameMax { return fmt.Errorf("snapshot name too long '%s'", newName) } fdst, err := openDir(dstDir) if err != nil { return err } defer fdst.Close() // TODO: make SnapshotSubVolume a method on FS to use existing fd f, err := openDir(subvol) if err != nil { return fmt.Errorf("cannot open dest dir: %v", err) } defer f.Close() args := btrfs_ioctl_vol_args_v2{ fd: int64(f.Fd()), } if ro { args.flags |= SubvolReadOnly } // TODO //if inherit != nil { // args.flags |= subvolQGroupInherit // args.size = qgroup_inherit_size(inherit) // args.qgroup_inherit = inherit //} copy(args.name[:], newName) if err := iocSnapCreateV2(fdst, &args); err != nil { return fmt.Errorf("snapshot create failed: %v", err) } return nil } func IsReadOnly(path string) (bool, error) { f, err := GetFlags(path) if err != nil { return false, err } return f.ReadOnly(), nil } func GetFlags(path string) (SubvolFlags, error) { fs, err := Open(path, true) if err != nil { return 0, err } defer fs.Close() return fs.GetFlags() } func listSubVolumes(f *os.File, filter func(SubvolInfo) bool) (map[objectID]SubvolInfo, error) { sk := btrfs_ioctl_search_key{ // search in the tree of tree roots tree_id: rootTreeObjectid, // Set the min and max to backref keys. The search will // only send back this type of key now. min_type: rootItemKey, max_type: rootBackrefKey, min_objectid: firstFreeObjectid, // Set all the other params to the max, we'll take any objectid // and any trans. max_objectid: lastFreeObjectid, max_offset: maxUint64, max_transid: maxUint64, nr_items: 4096, // just a big number, doesn't matter much } m := make(map[objectID]SubvolInfo) for { out, err := treeSearchRaw(f, sk) if err != nil { return nil, err } else if len(out) == 0 { break } for _, obj := range out { switch obj.Type { //case rootBackrefKey: // ref := asRootRef(obj.Data) // o := m[obj.ObjectID] // o.TransID = obj.TransID // o.ObjectID = obj.ObjectID // o.RefTree = obj.Offset // o.DirID = ref.DirID // o.Name = ref.Name // m[obj.ObjectID] = o case rootItemKey: o := m[obj.ObjectID] o.RootID = uint64(obj.ObjectID) robj := asRootItem(obj.Data).Decode() o.fillFromItem(&robj) m[obj.ObjectID] = o } } // record the mins in key so we can make sure the // next search doesn't repeat this root last := out[len(out)-1] sk.min_objectid = last.ObjectID sk.min_type = last.Type sk.min_offset = last.Offset + 1 if sk.min_offset == 0 { // overflow sk.min_type++ } else { continue } if sk.min_type > rootBackrefKey { sk.min_type = rootItemKey sk.min_objectid++ } else { continue } if sk.min_objectid > sk.max_objectid { break } } // resolve paths for id, v := range m { if path, err := subvolidResolve(f, id); err == ErrNotFound { delete(m, id) continue } else if err != nil { return m, fmt.Errorf("cannot resolve path for %v: %v", id, err) } else { v.Path = path m[id] = v } if filter != nil && !filter(v) { delete(m, id) } } return m, nil } type SubvolInfo struct { RootID uint64 UUID UUID ParentUUID UUID ReceivedUUID UUID CTime time.Time OTime time.Time STime time.Time RTime time.Time CTransID uint64 OTransID uint64 STransID uint64 RTransID uint64 Path string } func (s *SubvolInfo) fillFromItem(it *rootItem) { s.UUID = it.UUID s.ReceivedUUID = it.ReceivedUUID s.ParentUUID = it.ParentUUID s.CTime = it.CTime s.OTime = it.OTime s.STime = it.STime s.RTime = it.RTime s.CTransID = it.CTransID s.OTransID = it.OTransID s.STransID = it.STransID s.RTransID = it.RTransID } func subvolSearchByUUID(mnt *os.File, uuid UUID) (*SubvolInfo, error) { id, err := lookupUUIDSubvolItem(mnt, uuid) if err != nil { return nil, err } return subvolSearchByRootID(mnt, id, "") } func subvolSearchByReceivedUUID(mnt *os.File, uuid UUID) (*SubvolInfo, error) { id, err := lookupUUIDReceivedSubvolItem(mnt, uuid) if err != nil { return nil, err } return subvolSearchByRootID(mnt, id, "") } func subvolSearchByPath(mnt *os.File, path string) (*SubvolInfo, error) { if !filepath.IsAbs(path) { path = filepath.Join(mnt.Name(), path) } id, err := getPathRootID(path) if err != nil { return nil, err } return subvolSearchByRootID(mnt, id, path) } func subvolidResolve(mnt *os.File, subvolID objectID) (string, error) { return subvolidResolveSub(mnt, "", subvolID) } func subvolidResolveSub(mnt *os.File, path string, subvolID objectID) (string, error) { if subvolID == fsTreeObjectid { return "", nil } sk := btrfs_ioctl_search_key{ tree_id: rootTreeObjectid, min_objectid: subvolID, max_objectid: subvolID, min_type: rootBackrefKey, max_type: rootBackrefKey, max_offset: maxUint64, max_transid: maxUint64, nr_items: 1, } results, err := treeSearchRaw(mnt, sk) if err != nil { return "", err } else if len(results) < 1 { return "", ErrNotFound } res := results[0] if objectID(res.Offset) != fsTreeObjectid { spath, err := subvolidResolveSub(mnt, path, objectID(res.Offset)) if err != nil { return "", err } path = spath + "/" } backRef := asRootRef(res.Data) if backRef.DirID != firstFreeObjectid { arg := btrfs_ioctl_ino_lookup_args{ treeid: objectID(res.Offset), objectid: backRef.DirID, } if err := iocInoLookup(mnt, &arg); err != nil { return "", err } path += arg.Name() } return path + backRef.Name, nil } // subvolSearchByRootID // // Path is optional, and will be resolved automatically if not set. func subvolSearchByRootID(mnt *os.File, rootID objectID, path string) (*SubvolInfo, error) { robj, err := readRootItem(mnt, rootID) if err != nil { return nil, err } info := &SubvolInfo{ RootID: uint64(rootID), Path: path, } info.fillFromItem(robj) if path == "" { info.Path, err = subvolidResolve(mnt, objectID(info.RootID)) } return info, err } golang-github-dennwc-btrfs-0.0~git20240418.0167142/test/000077500000000000000000000000001461022635500220275ustar00rootroot00000000000000golang-github-dennwc-btrfs-0.0~git20240418.0167142/test/btrfstest.go000066400000000000000000000036511461022635500244030ustar00rootroot00000000000000package btrfstest import ( "bytes" "errors" "io/ioutil" "log" "os" "os/exec" "strings" "testing" "time" ) func run(name string, args ...string) error { buf := bytes.NewBuffer(nil) cmd := exec.Command(name, args...) cmd.Stdout = buf cmd.Stderr = buf err := cmd.Run() if err == nil { return nil } else if buf.Len() == 0 { return err } return errors.New("error: " + strings.TrimSpace(string(buf.Bytes()))) } func Mkfs(file string, size int64) error { f, err := os.Create(file) if err != nil { return err } if err = f.Truncate(size); err != nil { f.Close() return err } if err = f.Close(); err != nil { return err } if err = run("mkfs.btrfs", file); err != nil { os.Remove(file) return err } return err } func Mount(mount string, file string) error { if err := run("mount", file, mount); err != nil { return err } return nil } func Unmount(mount string) error { for i := 0; i < 5; i++ { if err := run("umount", mount); err == nil { break } else { if strings.Contains(err.Error(), "busy") { time.Sleep(time.Second) } else { break } } } return nil } func New(t testing.TB, size int64) (string, func()) { f, err := ioutil.TempFile("", "btrfs_vol") if err != nil { t.Fatal(err) } name := f.Name() f.Close() rm := func() { os.Remove(name) } if err = Mkfs(name, size); err != nil { rm() } mount, err := ioutil.TempDir("", "btrfs_mount") if err != nil { rm() t.Fatal(err) } if err = Mount(mount, name); err != nil { rm() os.RemoveAll(mount) if txt := err.Error(); strings.Contains(txt, "permission denied") || strings.Contains(txt, "only root") { t.Skip(err) } else { t.Fatal(err) } } done := false return mount, func() { if done { return } if err := Unmount(mount); err != nil { log.Println("umount failed:", err) } if err := os.Remove(mount); err != nil { log.Println("cleanup failed:", err) } rm() done = true } } golang-github-dennwc-btrfs-0.0~git20240418.0167142/usage.go000066400000000000000000000104251461022635500225050ustar00rootroot00000000000000package btrfs import ( "os" "sort" "syscall" ) func cmpChunkBlockGroup(f1, f2 blockGroup) int { var mask blockGroup if (f1 & _BTRFS_BLOCK_GROUP_TYPE_MASK) == (f2 & _BTRFS_BLOCK_GROUP_TYPE_MASK) { mask = _BTRFS_BLOCK_GROUP_PROFILE_MASK } else if f2&blockGroupSystem != 0 { return -1 } else if f1&blockGroupSystem != 0 { return +1 } else { mask = _BTRFS_BLOCK_GROUP_TYPE_MASK } if (f1 & mask) > (f2 & mask) { return +1 } else if (f1 & mask) < (f2 & mask) { return -1 } else { return 0 } } type spaceInfoByBlockGroup []spaceInfo func (a spaceInfoByBlockGroup) Len() int { return len(a) } func (a spaceInfoByBlockGroup) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a spaceInfoByBlockGroup) Less(i, j int) bool { return cmpChunkBlockGroup(blockGroup(a[i].Flags), blockGroup(a[j].Flags)) < 0 } type UsageInfo struct { Total uint64 TotalUnused uint64 TotalUsed uint64 TotalChunks uint64 FreeEstimated uint64 FreeMin uint64 LogicalDataChunks uint64 RawDataChunks uint64 RawDataUsed uint64 LogicalMetaChunks uint64 RawMetaChunks uint64 RawMetaUsed uint64 SystemUsed uint64 SystemChunks uint64 DataRatio float64 MetadataRatio float64 GlobalReserve uint64 GlobalReserveUsed uint64 } const minUnallocatedThreshold = 16 * 1024 * 1024 func spaceUsage(f *os.File) (UsageInfo, error) { info, err := iocFsInfo(f) if err != nil { return UsageInfo{}, err } var u UsageInfo for i := uint64(0); i <= info.max_id; i++ { dev, err := iocDevInfo(f, i, UUID{}) if err == syscall.ENODEV { continue } else if err != nil { return UsageInfo{}, err } u.Total += dev.total_bytes } spaces, err := iocSpaceInfo(f) if err != nil { return UsageInfo{}, err } sort.Sort(spaceInfoByBlockGroup(spaces)) var ( maxDataRatio int = 1 mixed bool ) for _, s := range spaces { ratio := 1 bg := s.Flags.BlockGroup() switch { case bg&blockGroupRaid0 != 0: ratio = 1 case bg&blockGroupRaid1 != 0: ratio = 2 case bg&blockGroupRaid5 != 0: ratio = 0 case bg&blockGroupRaid6 != 0: ratio = 0 case bg&blockGroupDup != 0: ratio = 2 case bg&blockGroupRaid10 != 0: ratio = 2 } if ratio > maxDataRatio { maxDataRatio = ratio } if bg&spaceInfoGlobalRsv != 0 { u.GlobalReserve = s.TotalBytes u.GlobalReserveUsed = s.UsedBytes } if bg&(blockGroupData|blockGroupMetadata) == (blockGroupData | blockGroupMetadata) { mixed = true } if bg&blockGroupData != 0 { u.RawDataUsed += s.UsedBytes * uint64(ratio) u.RawDataChunks += s.TotalBytes * uint64(ratio) u.LogicalDataChunks += s.TotalBytes } if bg&blockGroupMetadata != 0 { u.RawMetaUsed += s.UsedBytes * uint64(ratio) u.RawMetaChunks += s.TotalBytes * uint64(ratio) u.LogicalMetaChunks += s.TotalBytes } if bg&blockGroupSystem != 0 { u.SystemUsed += s.UsedBytes * uint64(ratio) u.SystemChunks += s.TotalBytes * uint64(ratio) } } u.TotalChunks = u.RawDataChunks + u.SystemChunks u.TotalUsed = u.RawDataUsed + u.SystemUsed if !mixed { u.TotalChunks += u.RawMetaChunks u.TotalUsed += u.RawMetaUsed } u.TotalUnused = u.Total - u.TotalChunks u.DataRatio = float64(u.RawDataChunks) / float64(u.LogicalDataChunks) if mixed { u.MetadataRatio = u.DataRatio } else { u.MetadataRatio = float64(u.RawMetaChunks) / float64(u.LogicalMetaChunks) } // We're able to fill at least DATA for the unused space // // With mixed raid levels, this gives a rough estimate but more // accurate than just counting the logical free space // (l_data_chunks - l_data_used) // // In non-mixed case there's no difference. u.FreeEstimated = uint64(float64(u.RawDataChunks-u.RawDataUsed) / u.DataRatio) // For mixed-bg the metadata are left out in calculations thus global // reserve would be lost. Part of it could be permanently allocated, // we have to subtract the used bytes so we don't go under zero free. if mixed { u.FreeEstimated -= u.GlobalReserve - u.GlobalReserveUsed } u.FreeMin = u.FreeEstimated // Chop unallocatable space // FIXME: must be applied per device if u.TotalUnused >= minUnallocatedThreshold { u.FreeEstimated += uint64(float64(u.TotalUnused) / u.DataRatio) // Match the calculation of 'df', use the highest raid ratio u.FreeMin += u.TotalUnused / uint64(maxDataRatio) } return u, nil } golang-github-dennwc-btrfs-0.0~git20240418.0167142/utils.go000066400000000000000000000044501461022635500225420ustar00rootroot00000000000000package btrfs import ( "bytes" "fmt" "os" "path/filepath" "strings" "syscall" "unsafe" "github.com/dennwc/btrfs/mtab" ) func isBtrfs(path string) (bool, error) { var stfs syscall.Statfs_t if err := syscall.Statfs(path, &stfs); err != nil { return false, &os.PathError{Op: "statfs", Path: path, Err: err} } fsType := uint32(stfs.Type) return fsType == SuperMagic, nil } func findMountRoot(path string) (string, error) { mounts, err := mtab.Mounts() if err != nil { return "", err } longest := "" isBtrfs := false for _, m := range mounts { if !strings.HasPrefix(path, m.Mount) { continue } if len(longest) < len(m.Mount) { longest = m.Mount isBtrfs = m.Type == "btrfs" } } if longest == "" { return "", os.ErrNotExist } else if !isBtrfs { return "", ErrNotBtrfs{Path: longest} } return filepath.Abs(longest) } // openDir does the following checks before calling Open: // 1: path is in a btrfs filesystem // 2: path is a directory func openDir(path string) (*os.File, error) { if ok, err := isBtrfs(path); err != nil { return nil, err } else if !ok { return nil, ErrNotBtrfs{Path: path} } file, err := os.Open(path) if err != nil { return nil, err } else if st, err := file.Stat(); err != nil { file.Close() return nil, err } else if !st.IsDir() { file.Close() return nil, fmt.Errorf("not a directory: %s", path) } return file, nil } type searchResult struct { TransID uint64 ObjectID objectID Type treeKeyType Offset uint64 Data []byte } func treeSearchRaw(mnt *os.File, key btrfs_ioctl_search_key) (out []searchResult, _ error) { args := btrfs_ioctl_search_args{ key: key, } if err := iocTreeSearch(mnt, &args); err != nil { return nil, err } out = make([]searchResult, 0, args.key.nr_items) buf := args.buf[:] for i := 0; i < int(args.key.nr_items); i++ { h := (*btrfs_ioctl_search_header)(unsafe.Pointer(&buf[0])) buf = buf[unsafe.Sizeof(btrfs_ioctl_search_header{}):] out = append(out, searchResult{ TransID: h.transid, ObjectID: h.objectid, Offset: h.offset, Type: h.typ, Data: buf[:h.len:h.len], // TODO: reallocate? }) buf = buf[h.len:] } return out, nil } func stringFromBytes(input []byte) string { if i := bytes.IndexByte(input, 0); i >= 0 { input = input[:i] } return string(input) } golang-github-dennwc-btrfs-0.0~git20240418.0167142/uuid_tree.go000066400000000000000000000023711461022635500233670ustar00rootroot00000000000000package btrfs import ( "encoding/binary" "fmt" "os" ) func lookupUUIDSubvolItem(f *os.File, uuid UUID) (objectID, error) { return uuidTreeLookupAny(f, uuid, uuidKeySubvol) } func lookupUUIDReceivedSubvolItem(f *os.File, uuid UUID) (objectID, error) { return uuidTreeLookupAny(f, uuid, uuidKeyReceivedSubvol) } func (id UUID) toKey() (objID objectID, off uint64) { objID = objectID(binary.LittleEndian.Uint64(id[:8])) off = binary.LittleEndian.Uint64(id[8:16]) return } // uuidTreeLookupAny searches uuid tree for a given uuid in specified field. // It returns ErrNotFound if object was not found. func uuidTreeLookupAny(f *os.File, uuid UUID, typ treeKeyType) (objectID, error) { objId, off := uuid.toKey() args := btrfs_ioctl_search_key{ tree_id: uuidTreeObjectid, min_objectid: objId, max_objectid: objId, min_type: typ, max_type: typ, min_offset: off, max_offset: off, max_transid: maxUint64, nr_items: 1, } res, err := treeSearchRaw(f, args) if err != nil { return 0, err } else if len(res) < 1 { return 0, ErrNotFound } out := res[0] if len(out.Data) != 8 { return 0, fmt.Errorf("btrfs: uuid item with illegal size %d", len(out.Data)) } return objectID(binary.LittleEndian.Uint64(out.Data)), nil } golang-github-dennwc-btrfs-0.0~git20240418.0167142/xattr.go000066400000000000000000000027361461022635500225510ustar00rootroot00000000000000package btrfs import ( "bytes" "os" "syscall" ) const ( xattrPrefix = "btrfs." xattrCompression = xattrPrefix + "compression" ) type Compression string const ( CompressionNone = Compression("") LZO = Compression("lzo") ZLIB = Compression("zlib") ) func SetCompression(path string, v Compression) error { var value []byte if v != CompressionNone { var err error value, err = syscall.ByteSliceFromString(string(v)) if err != nil { return err } } err := syscall.Setxattr(path, xattrCompression, value, 0) if err != nil { return &os.PathError{Op: "setxattr", Path: path, Err: err} } return nil } func GetCompression(path string) (Compression, error) { var buf []byte for { sz, err := syscall.Getxattr(path, xattrCompression, nil) if err == syscall.ENODATA || sz == 0 { return CompressionNone, nil } else if err != nil { return CompressionNone, &os.PathError{Op: "getxattr", Path: path, Err: err} } if cap(buf) < sz { buf = make([]byte, sz) } else { buf = buf[:sz] } sz, err = syscall.Getxattr(path, xattrCompression, buf) if err == syscall.ENODATA { return CompressionNone, nil } else if err == syscall.ERANGE { // xattr changed by someone else, and is larger than our current buffer continue } else if err != nil { return CompressionNone, &os.PathError{Op: "getxattr", Path: path, Err: err} } buf = buf[:sz] break } buf = bytes.TrimSuffix(buf, []byte{0}) return Compression(buf), nil }