pax_global_header00006660000000000000000000000064145555533340014526gustar00rootroot0000000000000052 comment=636d7a7b9ae6db4704a12cd990b57a7c8f0ede78 gofumpt-0.6.0/000077500000000000000000000000001455555333400132125ustar00rootroot00000000000000gofumpt-0.6.0/.gitattributes000066400000000000000000000001211455555333400160770ustar00rootroot00000000000000# To prevent CRLF breakages on Windows for fragile files, like testdata. * -text gofumpt-0.6.0/.github/000077500000000000000000000000001455555333400145525ustar00rootroot00000000000000gofumpt-0.6.0/.github/FUNDING.yml000066400000000000000000000000161455555333400163640ustar00rootroot00000000000000github: mvdan gofumpt-0.6.0/.github/workflows/000077500000000000000000000000001455555333400166075ustar00rootroot00000000000000gofumpt-0.6.0/.github/workflows/test.yml000066400000000000000000000013701455555333400203120ustar00rootroot00000000000000on: [push, pull_request] name: Test jobs: test: strategy: matrix: go-version: [1.20.x, 1.21.x] os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - uses: actions/setup-go@v3 with: go-version: ${{ matrix.go-version }} - uses: actions/checkout@v3 - run: go test ./... - run: go test -race ./... # Static checks from this point forward. Only run on one Go version and on # Linux, since it's the fastest platform, and the tools behave the same. - if: matrix.os == 'ubuntu-latest' && matrix.go-version == '1.21.x' run: diff <(echo -n) <(gofmt -s -d .) - if: matrix.os == 'ubuntu-latest' && matrix.go-version == '1.21.x' run: go vet ./... gofumpt-0.6.0/CHANGELOG.md000066400000000000000000000165361455555333400150360ustar00rootroot00000000000000# Changelog ## [v0.6.0] - 2024-01-28 This release is based on Go 1.21's gofmt, and requires Go 1.20 or later. The following changes are included: * Support `go` version strings from newer go.mod files - [#280] * Consider simple error checks even if they use the `=` operator - [#271] * Ignore `//line` directives to avoid panics - [#288] ## [v0.5.0] - 2023-04-09 This release is based on Go 1.20's gofmt, and requires Go 1.19 or later. The biggest change in this release is that we now vendor copies of the packages `go/format`, `go/printer`, and `go/doc/comment` on top of `cmd/gofmt` itself. This allows for each gofumpt release to format code in exactly the same way no matter what Go version is used to build it, as Go versions can change those three packages in ways that alter formatting behavior. This vendoring adds a small amount of duplication when using the `mvdan.cc/gofumpt/format` library, but it's the only way to make gofumpt versions consistent in their behavior and formatting, just like gofmt. The jump to Go 1.20's `go/printer` should also bring a small performance improvement, as we contributed patches to make printing about 25% faster: * https://go.dev/cl/412555 * https://go.dev/cl/412557 * https://go.dev/cl/424924 The following changes are included as well: * Skip `testdata` dirs by default like we already do for `vendor` - [#260] * Avoid inserting newlines incorrectly in some func signatures - [#235] * Avoid joining some comments with the previous line - [#256] * Fix `gofumpt -version` for release archives - [#253] ## [v0.4.0] - 2022-09-27 This release is based on Go 1.19's gofmt, and requires Go 1.18 or later. We recommend building gofumpt with Go 1.19 for the best formatting results. The jump from Go 1.18 brings diffing in pure Go, removing the need to exec `diff`, and a small parsing speed-up thanks to `go/parser.SkipObjectResolution`. The following formatting fixes are included as well: * Allow grouping declarations with comments - [#212] * Properly measure the length of case clauses - [#217] * Fix a few crashes found by Go's native fuzzing ## [v0.3.1] - 2022-03-21 This bugfix release resolves a number of issues: * Avoid "too many open files" error regression introduced by [v0.3.0] - [#208] * Use the `go.mod` relative to each Go file when deriving flag defaults - [#211] * Remove unintentional debug prints when directly formatting files ## [v0.3.0] - 2022-02-22 This is gofumpt's third major release, based on Go 1.18's gofmt. The jump from Go 1.17's gofmt should bring a noticeable speed-up, as the tool can now format many files concurrently. On an 8-core laptop, formatting a large codebase is 4x as fast. The following [formatting rules](https://github.com/mvdan/gofumpt#Added-rules) are added: * Functions should separate `) {` where the indentation helps readability * Field lists should not have leading or trailing empty lines The following changes are included as well: * Generated files are now fully formatted when given as explicit arguments * Prepare for Go 1.18's module workspaces, which could cause errors * Import paths sharing a prefix with the current module path are no longer grouped with standard library imports * `format.Options` gains a `ModulePath` field per the last bullet point ## [v0.2.1] - 2021-12-12 This bugfix release resolves a number of issues: * Add deprecated flags `-s` and `-r` once again, now giving useful errors * Avoid a panic with certain function declaration styles * Don't group interface members of different kinds * Account for leading comments in composite literals ## [v0.2.0] - 2021-11-10 This is gofumpt's second major release, based on Go 1.17's gofmt. The jump from Go 1.15's gofmt should bring a mild speed-up, as walking directories with `filepath.WalkDir` uses fewer syscalls. gofumports is now removed, after being deprecated in [v0.1.0]. Its main purpose was IDE integration; it is now recommended to use gopls, which in turn implements goimports and supports gofumpt natively. IDEs which don't integrate with gopls (such as GoLand) implement goimports too, so it is safe to use gofumpt as their "format on save" command. See the [installation instructions](https://github.com/mvdan/gofumpt#Installation) for more details. The following [formatting rules](https://github.com/mvdan/gofumpt#Added-rules) are added: * Composite literals should not have leading or trailing empty lines * No empty lines following an assignment operator * Functions using an empty line for readability should use a `) {` line instead * Remove unnecessary empty lines from interfaces Finally, the following changes are made to the gofumpt tool: * Initial support for Go 1.18's type parameters is added * The `-r` flag is removed in favor of `gofmt -r` * The `-s` flag is removed as it is always enabled * Vendor directories are skipped unless given as explicit arguments * The added rules are not applied to generated Go files * The `format` Go API now also applies the `gofmt -s` simplification * Add support for `//gofumpt:diagnose` comments ## [v0.1.1] - 2021-03-11 This bugfix release backports fixes for a few issues: * Keep leading empty lines in func bodies if they help readability * Avoid breaking comment alignment on empty field lists * Add support for `//go-sumtype:` directives ## [v0.1.0] - 2021-01-05 This is gofumpt's first release, based on Go 1.15.x. It solidifies the features which have worked well for over a year. This release will be the last to include `gofumports`, the fork of `goimports` which applies `gofumpt`'s rules on top of updating the Go import lines. Users who were relying on `goimports` in their editors or IDEs to apply both `gofumpt` and `goimports` in a single step should switch to gopls, the official Go language server. It is supported by many popular editors such as VS Code and Vim, and already bundles gofumpt support. Instructions are available [in the README](https://github.com/mvdan/gofumpt). `gofumports` also added maintenance work and potential confusion to end users. In the future, there will only be one way to use `gofumpt` from the command line. We also have a [Go API](https://pkg.go.dev/mvdan.cc/gofumpt/format) for those building programs with gofumpt. Finally, this release adds the `-version` flag, to print the tool's own version. The flag will work for "master" builds too. [v0.6.0]: https://github.com/mvdan/gofumpt/releases/tag/v0.6.0 [#271]: https://github.com/mvdan/gofumpt/issues/271 [#280]: https://github.com/mvdan/gofumpt/issues/280 [#288]: https://github.com/mvdan/gofumpt/issues/288 [v0.5.0]: https://github.com/mvdan/gofumpt/releases/tag/v0.5.0 [#235]: https://github.com/mvdan/gofumpt/issues/235 [#253]: https://github.com/mvdan/gofumpt/issues/253 [#256]: https://github.com/mvdan/gofumpt/issues/256 [#260]: https://github.com/mvdan/gofumpt/issues/260 [v0.4.0]: https://github.com/mvdan/gofumpt/releases/tag/v0.4.0 [#212]: https://github.com/mvdan/gofumpt/issues/212 [#217]: https://github.com/mvdan/gofumpt/issues/217 [v0.3.1]: https://github.com/mvdan/gofumpt/releases/tag/v0.3.1 [#208]: https://github.com/mvdan/gofumpt/issues/208 [#211]: https://github.com/mvdan/gofumpt/pull/211 [v0.3.0]: https://github.com/mvdan/gofumpt/releases/tag/v0.3.0 [v0.2.1]: https://github.com/mvdan/gofumpt/releases/tag/v0.2.1 [v0.2.0]: https://github.com/mvdan/gofumpt/releases/tag/v0.2.0 [v0.1.1]: https://github.com/mvdan/gofumpt/releases/tag/v0.1.1 [v0.1.0]: https://github.com/mvdan/gofumpt/releases/tag/v0.1.0 gofumpt-0.6.0/LICENSE000066400000000000000000000027201455555333400142200ustar00rootroot00000000000000Copyright (c) 2019, Daniel Martí. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. gofumpt-0.6.0/LICENSE.google000066400000000000000000000027071455555333400155000ustar00rootroot00000000000000Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. gofumpt-0.6.0/README.md000066400000000000000000000301041455555333400144670ustar00rootroot00000000000000# gofumpt [![Go Reference](https://pkg.go.dev/badge/mvdan.cc/gofumpt/format.svg)](https://pkg.go.dev/mvdan.cc/gofumpt/format) go install mvdan.cc/gofumpt@latest Enforce a stricter format than `gofmt`, while being backwards compatible. That is, `gofumpt` is happy with a subset of the formats that `gofmt` is happy with. The tool is a fork of `gofmt` as of Go 1.21, and requires Go 1.20 or later. It can be used as a drop-in replacement to format your Go code, and running `gofmt` after `gofumpt` should produce no changes. For example: gofumpt -l -w . Some of the Go source files in this repository belong to the Go project. The project includes copies of `go/printer` and `go/doc/comment` as of Go 1.21 to ensure consistent formatting independent of what Go version is being used. The [added formatting rules](#Added-rules) are implemented in the `format` package. `vendor` and `testdata` directories are skipped unless given as explicit arguments. Similarly, the added rules do not apply to generated Go files unless they are given as explicit arguments. Finally, note that the `-r` rewrite flag is removed in favor of `gofmt -r`, and the `-s` flag is hidden as it is always enabled. ### Added rules **No empty lines following an assignment operator**
Example ```go func foo() { foo := "bar" } ``` ```go func foo() { foo := "bar" } ```
**No empty lines around function bodies**
Example ```go func foo() { println("bar") } ``` ```go func foo() { println("bar") } ```
**Functions should separate `) {` where the indentation helps readability**
Example ```go func foo(s string, i int) { println("bar") } // With an empty line it's slightly better, but still not great. func bar(s string, i int) { println("bar") } ``` ```go func foo(s string, i int, ) { println("bar") } // With an empty line it's slightly better, but still not great. func bar(s string, i int, ) { println("bar") } ```
**No empty lines around a lone statement (or comment) in a block**
Example ```go if err != nil { return err } ``` ```go if err != nil { return err } ```
**No empty lines before a simple error check**
Example ```go foo, err := processFoo() if err != nil { return err } ``` ```go foo, err := processFoo() if err != nil { return err } ```
**Composite literals should use newlines consistently**
Example ```go // A newline before or after an element requires newlines for the opening and // closing braces. var ints = []int{1, 2, 3, 4} // A newline between consecutive elements requires a newline between all // elements. var matrix = [][]int{ {1}, {2}, { 3, }, } ``` ```go var ints = []int{ 1, 2, 3, 4, } var matrix = [][]int{ {1}, {2}, { 3, }, } ```
**Empty field lists should use a single line**
Example ```go var V interface { } = 3 type T struct { } func F( ) ``` ```go var V interface{} = 3 type T struct{} func F() ```
**`std` imports must be in a separate group at the top**
Example ```go import ( "foo.com/bar" "io" "io/ioutil" ) ``` ```go import ( "io" "io/ioutil" "foo.com/bar" ) ```
**Short case clauses should take a single line**
Example ```go switch c { case 'a', 'b', 'c', 'd': } ``` ```go switch c { case 'a', 'b', 'c', 'd': } ```
**Multiline top-level declarations must be separated by empty lines**
Example ```go func foo() { println("multiline foo") } func bar() { println("multiline bar") } ``` ```go func foo() { println("multiline foo") } func bar() { println("multiline bar") } ```
**Single var declarations should not be grouped with parentheses**
Example ```go var ( foo = "bar" ) ``` ```go var foo = "bar" ```
**Contiguous top-level declarations should be grouped together**
Example ```go var nicer = "x" var with = "y" var alignment = "z" ``` ```go var ( nicer = "x" with = "y" alignment = "z" ) ```
**Simple var-declaration statements should use short assignments**
Example ```go var s = "somestring" ``` ```go s := "somestring" ```
**The `-s` code simplification flag is enabled by default**
Example ```go var _ = [][]int{[]int{1}} ``` ```go var _ = [][]int{{1}} ```
**Octal integer literals should use the `0o` prefix on modules using Go 1.13 and later**
Example ```go const perm = 0755 ``` ```go const perm = 0o755 ```
**Comments which aren't Go directives should start with a whitespace**
Example ```go //go:noinline //Foo is awesome. func Foo() {} ``` ```go //go:noinline // Foo is awesome. func Foo() {} ```
**Composite literals should not have leading or trailing empty lines**
Example ```go var _ = []string{ "foo", } var _ = map[string]string{ "foo": "bar", } ``` ```go var _ = []string{ "foo", } var _ = map[string]string{ "foo": "bar", } ```
**Field lists should not have leading or trailing empty lines**
Example ```go type Person interface { Name() string Age() int } type ZeroFields struct { // No fields are needed here. } ``` ```go type Person interface { Name() string Age() int } type ZeroFields struct { // No fields are needed here. } ```
#### Extra rules behind `-extra` **Adjacent parameters with the same type should be grouped together**
Example ```go func Foo(bar string, baz string) {} ``` ```go func Foo(bar, baz string) {} ```
### Installation `gofumpt` is a replacement for `gofmt`, so you can simply `go install` it as described at the top of this README and use it. When using an IDE or editor with Go integration based on `gopls`, it's best to configure the editor to use the `gofumpt` support built into `gopls`. The instructions below show how to set up `gofumpt` for some of the major editors out there. #### Visual Studio Code Enable the language server following [the official docs](https://github.com/golang/vscode-go#readme), and then enable gopls's `gofumpt` option. Note that VS Code will complain about the `gopls` settings, but they will still work. ```json "go.useLanguageServer": true, "gopls": { "formatting.gofumpt": true, }, ``` #### GoLand GoLand doesn't use `gopls` so it should be configured to use `gofumpt` directly. Once `gofumpt` is installed, follow the steps below: - Open **Settings** (File > Settings) - Open the **Tools** section - Find the *File Watchers* sub-section - Click on the `+` on the right side to add a new file watcher - Choose *Custom Template* When a window asks for settings, you can enter the following: * File Types: Select all .go files * Scope: Project Files * Program: Select your `gofumpt` executable * Arguments: `-w $FilePath$` * Output path to refresh: `$FilePath$` * Working directory: `$ProjectFileDir$` * Environment variables: `GOROOT=$GOROOT$;GOPATH=$GOPATH$;PATH=$GoBinDirs$` To avoid unnecessary runs, you should disable all checkboxes in the *Advanced* section. #### Vim The configuration depends on the plugin you are using: [vim-go](https://github.com/fatih/vim-go) or [govim](https://github.com/govim/govim). ##### vim-go To configure `gopls` to use `gofumpt`: ```vim let g:go_fmt_command="gopls" let g:go_gopls_gofumpt=1 ``` ##### govim To configure `gopls` to use `gofumpt`: ```vim call govim#config#Set("Gofumpt", 1) ``` #### Neovim When using [`lspconfig`](https://github.com/neovim/nvim-lspconfig), pass the `gofumpt` setting to `gopls`: ```lua require('lspconfig').gopls.setup({ settings = { gopls = { gofumpt = true } } }) ``` #### Emacs For [lsp-mode](https://emacs-lsp.github.io/lsp-mode/) users on version 8.0.0 or higher: ```elisp (setq lsp-go-use-gofumpt t) ``` For users of `lsp-mode` before `8.0.0`: ```elisp (lsp-register-custom-settings '(("gopls.gofumpt" t))) ``` For [eglot](https://github.com/joaotavora/eglot) users: ```elisp (setq-default eglot-workspace-configuration '((:gopls . ((gofumpt . t))))) ``` #### Helix When using the `gopls` language server, modify the Go settings in `~/.config/helix/languages.toml`: ```toml [language-server.gopls.config] "formatting.gofumpt" = true ``` #### Sublime Text With ST4, install the Sublime Text LSP extension according to [the documentation](https://github.com/sublimelsp/LSP), and enable `gopls`'s `gofumpt` option in the LSP package settings, including setting `lsp_format_on_save` to `true`. ```json "lsp_format_on_save": true, "clients": { "gopls": { "enabled": true, "initializationOptions": { "gofumpt": true, } } } ``` ### Roadmap This tool is a place to experiment. In the long term, the features that work well might be proposed for `gofmt` itself. The tool is also compatible with `gofmt` and is aimed to be stable, so you can rely on it for your code as long as you pin a version of it. ### Frequently Asked Questions > Why attempt to replace `gofmt` instead of building on top of it? Our design is to build on top of `gofmt`, and we'll never add rules which disagree with its formatting. So we extend `gofmt` rather than compete with it. The tool is a modified copy of `gofmt`, for the purpose of allowing its use as a drop-in replacement in editors and scripts. > Why are my module imports being grouped with standard library imports? Any import paths that don't start with a domain name like `foo.com` are effectively [reserved by the Go toolchain](https://github.com/golang/go/issues/32819). Third party modules should either start with a domain name, even a local one like `foo.local`, or use [a reserved path prefix](https://github.com/golang/go/issues/37641). For backwards compatibility with modules set up before these rules were clear, `gofumpt` will treat any import path sharing a prefix with the current module path as third party. For example, if the current module is `mycorp/mod1`, then all import paths in `mycorp/...` will be considered third party. > How can I use `gofumpt` if I already use `goimports` to replace `gofmt`? Most editors have replaced the `goimports` program with the same functionality provided by a language server like `gopls`. This mechanism is significantly faster and more powerful, since the language server has more information that is kept up to date, necessary to add missing imports. As such, the general recommendation is to let your editor fix your imports - either via `gopls`, such as VSCode or vim-go, or via their own custom implementation, such as GoLand. Then follow the install instructions above to enable the use of `gofumpt` instead of `gofmt`. If you want to avoid integrating with `gopls`, and are OK with the overhead of calling `goimports` from scratch on each save, you should be able to call both tools; for example, `goimports file.go && gofumpt file.go`. ### Contributing Issues and pull requests are welcome! Please open an issue to discuss a feature before sending a pull request. We also use the `#gofumpt` channel over at the [Gophers Slack](https://invite.slack.golangbridge.org/) to chat. When reporting a formatting bug, insert a `//gofumpt:diagnose` comment. The comment will be rewritten to include useful debugging information. For instance: ``` $ cat f.go package p //gofumpt:diagnose $ gofumpt f.go package p //gofumpt:diagnose v0.1.1-0.20211103104632-bdfa3b02e50a -lang=v1.16 ``` ### License Note that much of the code is copied from Go's `gofmt` command. You can tell which files originate from the Go repository from their copyright headers. Their license file is `LICENSE.google`. `gofumpt`'s original source files are also under the 3-clause BSD license, with the separate file `LICENSE`. gofumpt-0.6.0/doc.go000066400000000000000000000003011455555333400143000ustar00rootroot00000000000000// Copyright (c) 2023, Daniel Martí // See LICENSE for licensing information // gofumpt enforces a stricter format than gofmt, while being backwards compatible. package main gofumpt-0.6.0/format/000077500000000000000000000000001455555333400145025ustar00rootroot00000000000000gofumpt-0.6.0/format/format.go000066400000000000000000000771761455555333400163430ustar00rootroot00000000000000// Copyright (c) 2019, Daniel Martí // See LICENSE for licensing information // Package format exposes gofumpt's formatting in an API similar to go/format. // In general, the APIs are only guaranteed to work well when the input source // is in canonical gofmt format. package format import ( "bytes" "fmt" "go/ast" "go/parser" "go/token" "os" "reflect" "regexp" "sort" "strconv" "strings" "unicode" "unicode/utf8" "github.com/google/go-cmp/cmp" "golang.org/x/mod/semver" "golang.org/x/tools/go/ast/astutil" "mvdan.cc/gofumpt/internal/govendor/go/format" "mvdan.cc/gofumpt/internal/version" ) // Options is the set of formatting options which affect gofumpt. type Options struct { // TODO: link to the go/version docs once Go 1.22 is out. // The old semver docs said: // // LangVersion is treated as a semantic version, which may start with a "v" // prefix. Like Go versions, it may also be incomplete; "1.14" is equivalent // to "1.14.0". When empty, it is equivalent to "v1", to not use language // features which could break programs. // LangVersion is the Go version a piece of code is written in. // The version is used to decide whether to apply formatting // rules which require new language features. // When inside a Go module, LangVersion should typically be: // // go mod edit -json | jq -r '.Go' LangVersion string // ModulePath corresponds to the Go module path which contains the source // code being formatted. When inside a Go module, ModulePath should be: // // go mod edit -json | jq -r '.Module.Path' // // ModulePath is used for formatting decisions like what import paths are // considered to be not part of the standard library. When empty, the source // is formatted as if it weren't inside a module. ModulePath string // ExtraRules enables extra formatting rules, such as grouping function // parameters with repeated types together. ExtraRules bool } // Source formats src in gofumpt's format, assuming that src holds a valid Go // source file. func Source(src []byte, opts Options) ([]byte, error) { fset := token.NewFileSet() // Ensure our parsed files never start with base 1, // to ensure that using token.NoPos+1 will panic. fset.AddFile("gofumpt_base.go", 1, 10) file, err := parser.ParseFile(fset, "", src, parser.SkipObjectResolution|parser.ParseComments) if err != nil { return nil, err } File(fset, file, opts) var buf bytes.Buffer if err := format.Node(&buf, fset, file); err != nil { return nil, err } return buf.Bytes(), nil } var rxGoVersionMajorMinor = regexp.MustCompile(`^(v|go)?([1-9]+)\.([0-9]+)`) // File modifies a file and fset in place to follow gofumpt's format. The // changes might include manipulating adding or removing newlines in fset, // modifying the position of nodes, or modifying literal values. func File(fset *token.FileSet, file *ast.File, opts Options) { simplify(file) // TODO: replace this hacky mess with go/version once we can rely on Go 1.22, // as well as replacing our uses of the semver package. // In particular, we likely want to allow any of 1.21, 1.21.2, or go1.21rc3, // but we can rely on go/version.Lang to validate and normalize. if opts.LangVersion == "" { opts.LangVersion = "v1.0" } m := rxGoVersionMajorMinor.FindStringSubmatch(opts.LangVersion) if m == nil { panic(fmt.Sprintf("invalid Go version: %q", opts.LangVersion)) } opts.LangVersion = "v" + m[2] + "." + m[3] f := &fumpter{ file: fset.File(file.Pos()), fset: fset, astFile: file, Options: opts, minSplitFactor: 0.4, } var topFuncType *ast.FuncType pre := func(c *astutil.Cursor) bool { f.applyPre(c) switch node := c.Node().(type) { case *ast.FuncDecl: topFuncType = node.Type case *ast.FieldList: ft, _ := c.Parent().(*ast.FuncType) if ft == nil || ft != topFuncType { break } // For top-level function declaration parameters, // require the line split to be longer. // This avoids func lines which are a bit too short, // and allows func lines which are a bit longer. // // We don't just increase longLineLimit, // as we still want splits at around the same place. if ft.Params == node { f.minSplitFactor = 0.6 } // Don't split result parameters into multiple lines, // as that can be easily confused for input parameters. // TODO: consider the same for single-line func calls in // if statements. // TODO: perhaps just use a higher factor, like 0.8. if ft.Results == node { f.minSplitFactor = 1000 } case *ast.BlockStmt: f.blockLevel++ } return true } post := func(c *astutil.Cursor) bool { f.applyPost(c) // Reset minSplitFactor and blockLevel. switch node := c.Node().(type) { case *ast.FuncType: if node == topFuncType { f.minSplitFactor = 0.4 } case *ast.BlockStmt: f.blockLevel-- } return true } astutil.Apply(file, pre, post) } // Multiline nodes which could easily fit on a single line under this many bytes // may be collapsed onto a single line. const shortLineLimit = 60 // Single-line nodes which take over this many bytes, and could easily be split // into two lines of at least its minSplitFactor factor, may be split. const longLineLimit = 100 var rxOctalInteger = regexp.MustCompile(`\A0[0-7_]+\z`) type fumpter struct { Options file *token.File fset *token.FileSet astFile *ast.File // blockLevel is the number of indentation blocks we're currently under. // It is used to approximate the levels of indentation a line will end // up with. blockLevel int minSplitFactor float64 } func (f *fumpter) commentsBetween(p1, p2 token.Pos) []*ast.CommentGroup { comments := f.astFile.Comments i1 := sort.Search(len(comments), func(i int) bool { return comments[i].Pos() >= p1 }) comments = comments[i1:] i2 := sort.Search(len(comments), func(i int) bool { return comments[i].Pos() >= p2 }) comments = comments[:i2] return comments } func (f *fumpter) inlineComment(pos token.Pos) *ast.Comment { comments := f.astFile.Comments i := sort.Search(len(comments), func(i int) bool { return comments[i].Pos() >= pos }) if i >= len(comments) { return nil } line := f.Line(pos) for _, comment := range comments[i].List { if f.Line(comment.Pos()) == line { return comment } } return nil } // addNewline is a hack to let us force a newline at a certain position. func (f *fumpter) addNewline(at token.Pos) { offset := f.Offset(at) // TODO: replace with the new Lines method once we require Go 1.21 or later field := reflect.ValueOf(f.file).Elem().FieldByName("lines") n := field.Len() lines := make([]int, 0, n+1) for i := 0; i < n; i++ { cur := int(field.Index(i).Int()) if offset == cur { // This newline already exists; do nothing. Duplicate // newlines can't exist. return } if offset >= 0 && offset < cur { lines = append(lines, offset) offset = -1 } lines = append(lines, cur) } if offset >= 0 { lines = append(lines, offset) } if !f.file.SetLines(lines) { panic(fmt.Sprintf("could not set lines to %v", lines)) } } // removeLines removes all newlines between two positions, so that they end // up on the same line. func (f *fumpter) removeLines(fromLine, toLine int) { for fromLine < toLine { f.file.MergeLine(fromLine) toLine-- } } // removeLinesBetween is like removeLines, but it leaves one newline between the // two positions. func (f *fumpter) removeLinesBetween(from, to token.Pos) { f.removeLines(f.Line(from)+1, f.Line(to)) } func (f *fumpter) Position(p token.Pos) token.Position { return f.file.PositionFor(p, false) } func (f *fumpter) Line(p token.Pos) int { return f.Position(p).Line } func (f *fumpter) Offset(p token.Pos) int { return f.file.Offset(p) } type byteCounter int func (b *byteCounter) Write(p []byte) (n int, err error) { *b += byteCounter(len(p)) return len(p), nil } func (f *fumpter) printLength(node ast.Node) int { var count byteCounter if err := format.Node(&count, f.fset, node); err != nil { panic(fmt.Sprintf("unexpected print error: %v", err)) } // Add the space taken by an inline comment. if c := f.inlineComment(node.End()); c != nil { fmt.Fprintf(&count, " %s", c.Text) } // Add an approximation of the indentation level. We can't know the // number of tabs go/printer will add ahead of time. Trying to print the // entire top-level declaration would tell us that, but then it's near // impossible to reliably find our node again. return int(count) + (f.blockLevel * 8) } func (f *fumpter) lineEnd(line int) token.Pos { if line < 1 { panic("illegal line number") } total := f.file.LineCount() if line > total { panic("illegal line number") } if line == total { return f.astFile.End() } return f.file.LineStart(line+1) - 1 } // rxCommentDirective covers all common Go comment directives: // // //go: | standard Go directives, like go:noinline // //some-words: | similar to the syntax above, like lint:ignore or go-sumtype:decl // //line | inserted line information for cmd/compile // //export | to mark cgo funcs for exporting // //extern | C function declarations for gccgo // //sys(nb)? | syscall function wrapper prototypes // //nolint | nolint directive for golangci // //noinspection | noinspection directive for GoLand and friends // //NOSONAR | NOSONAR directive for SonarQube // // Note that the "some-words:" matching expects a letter afterward, such as // "go:generate", to prevent matching false positives like "https://site". var rxCommentDirective = regexp.MustCompile(`^([a-z-]+:[a-z]+|line\b|export\b|extern\b|sys(nb)?\b|no(lint|inspection)\b)|NOSONAR\b`) func (f *fumpter) applyPre(c *astutil.Cursor) { f.splitLongLine(c) switch node := c.Node().(type) { case *ast.File: // Join contiguous lone var/const/import lines. // Abort if there are empty lines in between, // including a leading comment if it's a directive. newDecls := make([]ast.Decl, 0, len(node.Decls)) for i := 0; i < len(node.Decls); { newDecls = append(newDecls, node.Decls[i]) start, ok := node.Decls[i].(*ast.GenDecl) if !ok || isCgoImport(start) || containsAnyDirective(start.Doc) { i++ continue } lastPos := start.Pos() contLoop: for i++; i < len(node.Decls); { cont, ok := node.Decls[i].(*ast.GenDecl) if !ok || cont.Tok != start.Tok || cont.Lparen != token.NoPos || isCgoImport(cont) { break } // Are there things between these two declarations? e.g. empty lines, comments, directives // If so, break the chain on empty lines and directives, continue below for comments. if f.Line(lastPos) < f.Line(cont.Pos())-1 { // break on empty line if cont.Doc == nil { break } // break on directive for i, comment := range cont.Doc.List { if f.Line(comment.Slash) != f.Line(lastPos)+1+i || rxCommentDirective.MatchString(strings.TrimPrefix(comment.Text, "//")) { break contLoop } } // continue below for comments } start.Specs = append(start.Specs, cont.Specs...) if c := f.inlineComment(cont.End()); c != nil { // don't move an inline comment outside start.Rparen = c.End() } else { // so the code below treats the joined // decl group as multi-line start.Rparen = cont.End() } lastPos = cont.Pos() i++ } } node.Decls = newDecls // Multiline top-level declarations should be separated by an // empty line. // Do this after the joining of lone declarations above, // as joining single-line declarations makes then multi-line. var lastMulti bool var lastEnd token.Pos for _, decl := range node.Decls { pos := decl.Pos() comments := f.commentsBetween(lastEnd, pos) if len(comments) > 0 { pos = comments[0].Pos() } // Note that we want End-1, as End is the character after the node. multi := f.Line(pos) < f.Line(decl.End()-1) if multi && lastMulti && f.Line(lastEnd)+1 == f.Line(pos) { f.addNewline(lastEnd) } lastMulti = multi lastEnd = decl.End() } // Comments aren't nodes, so they're not walked by default. groupLoop: for _, group := range node.Comments { for _, comment := range group.List { if comment.Text == "//gofumpt:diagnose" || strings.HasPrefix(comment.Text, "//gofumpt:diagnose ") { slc := []string{ "//gofumpt:diagnose", "version:", version.String(""), "flags:", "-lang=" + f.LangVersion, "-modpath=" + f.ModulePath, } if f.ExtraRules { slc = append(slc, "-extra") } comment.Text = strings.Join(slc, " ") } body := strings.TrimPrefix(comment.Text, "//") if body == comment.Text { // /*-style comment continue groupLoop } if rxCommentDirective.MatchString(body) { // this line is a directive continue groupLoop } r, _ := utf8.DecodeRuneInString(body) if !unicode.IsLetter(r) && !unicode.IsNumber(r) && !unicode.IsSpace(r) { // this line could be code like "//{" continue groupLoop } } // If none of the comment group's lines look like a // directive or code, add spaces, if needed. for _, comment := range group.List { body := strings.TrimPrefix(comment.Text, "//") r, _ := utf8.DecodeRuneInString(body) if !unicode.IsSpace(r) { comment.Text = "// " + body } } } case *ast.DeclStmt: decl, ok := node.Decl.(*ast.GenDecl) if !ok || decl.Tok != token.VAR || len(decl.Specs) != 1 { break // e.g. const name = "value" } spec := decl.Specs[0].(*ast.ValueSpec) if spec.Type != nil { break // e.g. var name Type } tok := token.ASSIGN names := make([]ast.Expr, len(spec.Names)) for i, name := range spec.Names { names[i] = name if name.Name != "_" { tok = token.DEFINE } } c.Replace(&ast.AssignStmt{ Lhs: names, Tok: tok, Rhs: spec.Values, }) case *ast.GenDecl: if node.Tok == token.IMPORT && node.Lparen.IsValid() { f.joinStdImports(node) } // Single var declarations shouldn't use parentheses, unless // there's a comment on the grouped declaration. if node.Tok == token.VAR && len(node.Specs) == 1 && node.Lparen.IsValid() && node.Doc == nil { specPos := node.Specs[0].Pos() specEnd := node.Specs[0].End() if len(f.commentsBetween(node.TokPos, specPos)) > 0 { // If the single spec has a comment on the line above, // the comment must go before the entire declaration now. node.TokPos = specPos } else { f.removeLines(f.Line(node.TokPos), f.Line(specPos)) } if len(f.commentsBetween(specEnd, node.Rparen)) > 0 { // Leave one newline to not force a comment on the next line to // become an inline comment. f.removeLines(f.Line(specEnd)+1, f.Line(node.Rparen)) } else { f.removeLines(f.Line(specEnd), f.Line(node.Rparen)) } // Remove the parentheses. go/printer will automatically // get rid of the newlines. node.Lparen = token.NoPos node.Rparen = token.NoPos } case *ast.InterfaceType: if len(node.Methods.List) > 0 { method := node.Methods.List[0] removeToPos := method.Pos() if comments := f.commentsBetween(node.Interface, method.Pos()); len(comments) > 0 { // only remove leading line upto the first comment removeToPos = comments[0].Pos() } // remove leading lines if they exist f.removeLines(f.Line(node.Interface)+1, f.Line(removeToPos)) } case *ast.BlockStmt: f.stmts(node.List) comments := f.commentsBetween(node.Lbrace, node.Rbrace) if len(node.List) == 0 && len(comments) == 0 { f.removeLinesBetween(node.Lbrace, node.Rbrace) break } var sign *ast.FuncType var cond ast.Expr switch parent := c.Parent().(type) { case *ast.FuncDecl: sign = parent.Type case *ast.FuncLit: sign = parent.Type case *ast.IfStmt: cond = parent.Cond case *ast.ForStmt: cond = parent.Cond } if len(node.List) > 1 && sign == nil { // only if we have a single statement, or if // it's a func body. break } var bodyPos, bodyEnd token.Pos if len(node.List) > 0 { bodyPos = node.List[0].Pos() bodyEnd = node.List[len(node.List)-1].End() } if len(comments) > 0 { if pos := comments[0].Pos(); !bodyPos.IsValid() || pos < bodyPos { bodyPos = pos } if pos := comments[len(comments)-1].End(); !bodyPos.IsValid() || pos > bodyEnd { bodyEnd = pos } } f.removeLinesBetween(bodyEnd, node.Rbrace) if cond != nil && f.Line(cond.Pos()) != f.Line(cond.End()) { // The body is preceded by a multi-line condition, so an // empty line can help readability. return } if sign != nil { endLine := f.Line(sign.End()) if f.Line(sign.Pos()) != endLine { handleMultiLine := func(fl *ast.FieldList) { // Refuse to insert a newline before the closing token // if the list is empty or all in one line. if fl == nil || len(fl.List) == 0 { return } fieldOpeningLine := f.Line(fl.Opening) fieldClosingLine := f.Line(fl.Closing) if fieldOpeningLine == fieldClosingLine { return } lastFieldEnd := fl.List[len(fl.List)-1].End() lastFieldLine := f.Line(lastFieldEnd) isLastFieldOnFieldClosingLine := lastFieldLine == fieldClosingLine isLastFieldOnSigClosingLine := lastFieldLine == endLine var isLastCommentGrpOnFieldClosingLine, isLastCommentGrpOnSigClosingLine bool if comments := f.commentsBetween(lastFieldEnd, fl.Closing); len(comments) > 0 { lastCommentGrp := comments[len(comments)-1] lastCommentGrpLine := f.Line(lastCommentGrp.End()) isLastCommentGrpOnFieldClosingLine = lastCommentGrpLine == fieldClosingLine isLastCommentGrpOnSigClosingLine = lastCommentGrpLine == endLine } // is there a comment grp/last field, field closing and sig closing on the same line? if (isLastFieldOnFieldClosingLine && isLastFieldOnSigClosingLine) || (isLastCommentGrpOnFieldClosingLine && isLastCommentGrpOnSigClosingLine) { fl.Closing += 1 f.addNewline(fl.Closing) } } handleMultiLine(sign.Params) if sign.Results != nil && len(sign.Results.List) > 0 { lastResultLine := f.Line(sign.Results.List[len(sign.Results.List)-1].End()) isLastResultOnParamClosingLine := sign.Params != nil && lastResultLine == f.Line(sign.Params.Closing) if !isLastResultOnParamClosingLine { handleMultiLine(sign.Results) } } } } f.removeLinesBetween(node.Lbrace, bodyPos) case *ast.CaseClause: f.stmts(node.Body) openLine := f.Line(node.Case) closeLine := f.Line(node.Colon) if openLine == closeLine { // nothing to do break } if len(f.commentsBetween(node.Case, node.Colon)) > 0 { // don't move comments break } // check the length excluding the body nodeWithoutBody := &ast.CaseClause{ Case: node.Case, List: node.List, Colon: node.Colon, } if f.printLength(nodeWithoutBody) > shortLineLimit { // too long to collapse break } f.removeLines(openLine, closeLine) case *ast.CommClause: f.stmts(node.Body) case *ast.FieldList: numFields := node.NumFields() comments := f.commentsBetween(node.Pos(), node.End()) if numFields == 0 && len(comments) == 0 { // Empty field lists should not contain a newline. // Do not join the two lines if the first has an inline // comment, as that can result in broken formatting. openLine := f.Line(node.Pos()) closeLine := f.Line(node.End()) f.removeLines(openLine, closeLine) } else { // Remove lines before first comment/field and lines after last // comment/field var bodyPos, bodyEnd token.Pos if numFields > 0 { bodyPos = node.List[0].Pos() bodyEnd = node.List[len(node.List)-1].End() } if len(comments) > 0 { if pos := comments[0].Pos(); !bodyPos.IsValid() || pos < bodyPos { bodyPos = pos } if pos := comments[len(comments)-1].End(); !bodyPos.IsValid() || pos > bodyEnd { bodyEnd = pos } } f.removeLinesBetween(node.Pos(), bodyPos) f.removeLinesBetween(bodyEnd, node.End()) } // Merging adjacent fields (e.g. parameters) is disabled by default. if !f.ExtraRules { break } switch c.Parent().(type) { case *ast.FuncDecl, *ast.FuncType, *ast.InterfaceType: node.List = f.mergeAdjacentFields(node.List) c.Replace(node) case *ast.StructType: // Do not merge adjacent fields in structs. } case *ast.BasicLit: // Octal number literals were introduced in 1.13. if semver.Compare(f.LangVersion, "v1.13") >= 0 { if node.Kind == token.INT && rxOctalInteger.MatchString(node.Value) { node.Value = "0o" + node.Value[1:] c.Replace(node) } } case *ast.AssignStmt: // Only remove lines between the assignment token and the first right-hand side expression f.removeLines(f.Line(node.TokPos), f.Line(node.Rhs[0].Pos())) } } func (f *fumpter) applyPost(c *astutil.Cursor) { switch node := c.Node().(type) { // Adding newlines to composite literals happens as a "post" step, so // that we can take into account whether "pre" steps added any newlines // that would affect us here. case *ast.CompositeLit: if len(node.Elts) == 0 { // doesn't have elements break } openLine := f.Line(node.Lbrace) closeLine := f.Line(node.Rbrace) if openLine == closeLine { // all in a single line break } newlineAroundElems := false newlineBetweenElems := false lastEnd := node.Lbrace lastLine := openLine for i, elem := range node.Elts { pos := elem.Pos() comments := f.commentsBetween(lastEnd, pos) if len(comments) > 0 { pos = comments[0].Pos() } if curLine := f.Line(pos); curLine > lastLine { if i == 0 { newlineAroundElems = true // remove leading lines if they exist f.removeLines(openLine+1, curLine) } else { newlineBetweenElems = true } } lastEnd = elem.End() lastLine = f.Line(lastEnd) } if closeLine > lastLine { newlineAroundElems = true } if newlineBetweenElems || newlineAroundElems { first := node.Elts[0] if openLine == f.Line(first.Pos()) { // We want the newline right after the brace. f.addNewline(node.Lbrace + 1) closeLine = f.Line(node.Rbrace) } last := node.Elts[len(node.Elts)-1] if closeLine == f.Line(last.End()) { // We want the newline right before the brace. f.addNewline(node.Rbrace) } } // If there's a newline between any consecutive elements, there // must be a newline between all composite literal elements. if !newlineBetweenElems { break } for i1, elem1 := range node.Elts { i2 := i1 + 1 if i2 >= len(node.Elts) { break } elem2 := node.Elts[i2] // TODO: do we care about &{}? _, ok1 := elem1.(*ast.CompositeLit) _, ok2 := elem2.(*ast.CompositeLit) if !ok1 && !ok2 { continue } if f.Line(elem1.End()) == f.Line(elem2.Pos()) { f.addNewline(elem1.End()) } } } } func (f *fumpter) splitLongLine(c *astutil.Cursor) { if os.Getenv("GOFUMPT_SPLIT_LONG_LINES") != "on" { // By default, this feature is turned off. // Turn it on by setting GOFUMPT_SPLIT_LONG_LINES=on. return } node := c.Node() if node == nil { return } newlinePos := node.Pos() start := f.Position(node.Pos()) end := f.Position(node.End()) // If the node is already split in multiple lines, there's nothing to do. if start.Line != end.Line { return } // Only split at the start of the current node if it's part of a list. if _, ok := c.Parent().(*ast.BinaryExpr); ok { // Chains of binary expressions are considered lists, too. } else if c.Index() >= 0 { // For the rest of the nodes, we're in a list if c.Index() >= 0. } else { return } // Like in printLength, add an approximation of the indentation level. // Since any existing tabs were already counted as one column, multiply // the level by 7. startCol := start.Column + f.blockLevel*7 endCol := end.Column + f.blockLevel*7 // If this is a composite literal, // and we were going to insert a newline before the entire literal, // insert the newline before the first element instead. // Since we'll add a newline after the last element too, // this format is generally going to be nicer. if comp := isComposite(node); comp != nil && len(comp.Elts) > 0 { newlinePos = comp.Elts[0].Pos() } // If this is a function call, // and we were to add a newline before the first argument, // prefer adding the newline before the entire call. // End-of-line parentheses aren't very nice, as we don't put their // counterparts at the start of a line too. // We do this by using the average of the two starting positions. if call, _ := node.(*ast.CallExpr); call != nil && len(call.Args) > 0 { first := f.Position(call.Args[0].Pos()) startCol += (first.Column - start.Column) / 2 } // If the start position is too short, we definitely won't split the line. if startCol <= shortLineLimit { return } lineEnd := f.Position(f.lineEnd(start.Line)) // firstLength and secondLength are the split line lengths, excluding // indentation. firstLength := start.Column - f.blockLevel if firstLength < 0 { panic("negative length") } secondLength := lineEnd.Column - start.Column if secondLength < 0 { panic("negative length") } // If the line ends past the long line limit, // and both splits are estimated to take at least minSplitFactor of the limit, // then split the line. minSplitLength := int(f.minSplitFactor * longLineLimit) if endCol > longLineLimit && firstLength >= minSplitLength && secondLength >= minSplitLength { f.addNewline(newlinePos) } } func isComposite(node ast.Node) *ast.CompositeLit { switch node := node.(type) { case *ast.CompositeLit: return node case *ast.UnaryExpr: return isComposite(node.X) // e.g. &T{} default: return nil } } func (f *fumpter) stmts(list []ast.Stmt) { for i, stmt := range list { ifs, ok := stmt.(*ast.IfStmt) if !ok || i < 1 { continue // not an if following another statement } as, ok := list[i-1].(*ast.AssignStmt) if !ok || (as.Tok != token.DEFINE && as.Tok != token.ASSIGN) || !identEqual(as.Lhs[len(as.Lhs)-1], "err") { continue // not ", err :=" nor ", err =" } be, ok := ifs.Cond.(*ast.BinaryExpr) if !ok || ifs.Init != nil || ifs.Else != nil { continue // complex if } if be.Op != token.NEQ || !identEqual(be.X, "err") || !identEqual(be.Y, "nil") { continue // not "err != nil" } f.removeLinesBetween(as.End(), ifs.Pos()) } } func identEqual(expr ast.Expr, name string) bool { id, ok := expr.(*ast.Ident) return ok && id.Name == name } // isCgoImport returns true if the declaration is simply: // // import "C" // // or the equivalent: // // import `C` // // Note that parentheses do not affect the result. func isCgoImport(decl *ast.GenDecl) bool { if decl.Tok != token.IMPORT || len(decl.Specs) != 1 { return false } spec := decl.Specs[0].(*ast.ImportSpec) v, err := strconv.Unquote(spec.Path.Value) if err != nil { panic(err) // should never error } return v == "C" } // joinStdImports ensures that all standard library imports are together and at // the top of the imports list. func (f *fumpter) joinStdImports(d *ast.GenDecl) { var std, other []ast.Spec firstGroup := true lastEnd := d.Pos() needsSort := false // If ModulePath is "foo/bar", we assume "foo/..." is not part of std. // Users shouldn't declare modules that may collide with std this way, // but historically some private codebases have done so. // This is a relatively harmless way to make gofumpt compatible with them, // as it changes nothing for the common external module paths. var modulePrefix string if f.ModulePath == "" { // Nothing to do. } else if i := strings.IndexByte(f.ModulePath, '/'); i != -1 { // ModulePath is "foo/bar", so we use "foo" as the prefix. modulePrefix = f.ModulePath[:i] } else { // ModulePath is "foo", so we use "foo" as the prefix. modulePrefix = f.ModulePath } for i, spec := range d.Specs { spec := spec.(*ast.ImportSpec) if coms := f.commentsBetween(lastEnd, spec.Pos()); len(coms) > 0 { lastEnd = coms[len(coms)-1].End() } if i > 0 && firstGroup && f.Line(spec.Pos()) > f.Line(lastEnd)+1 { firstGroup = false } else { // We're still in the first group, update lastEnd. lastEnd = spec.End() } path, err := strconv.Unquote(spec.Path.Value) if err != nil { panic(err) // should never error } periodIndex := strings.IndexByte(path, '.') slashIndex := strings.IndexByte(path, '/') switch { // Imports with a period in the first path element are third party. // Note that this includes "foo.com" and excludes "foo/bar.com/baz". case periodIndex > 0 && (slashIndex == -1 || periodIndex < slashIndex), // "test" and "example" are reserved as per golang.org/issue/37641. // "internal" is unreachable. strings.HasPrefix(path, "test/"), strings.HasPrefix(path, "example/"), strings.HasPrefix(path, "internal/"), // See if we match modulePrefix; see its documentation above. // We match either exactly or with a slash suffix, // so that the prefix "foo" for "foo/..." does not match "foobar". path == modulePrefix || strings.HasPrefix(path, modulePrefix+"/"), // To be conservative, if an import has a name or an inline // comment, and isn't part of the top group, treat it as non-std. !firstGroup && (spec.Name != nil || spec.Comment != nil): other = append(other, spec) continue } // If we're moving this std import further up, reset its // position, to avoid breaking comments. if !firstGroup || len(other) > 0 { setPos(reflect.ValueOf(spec), d.Pos()) needsSort = true } std = append(std, spec) } // Ensure there is an empty line between std imports and other imports. if len(std) > 0 && len(other) > 0 && f.Line(std[len(std)-1].End())+1 >= f.Line(other[0].Pos()) { // We add two newlines, as that's necessary in some edge cases. // For example, if the std and non-std imports were together and // without indentation, adding one newline isn't enough. Two // empty lines will be printed as one by go/printer, anyway. f.addNewline(other[0].Pos() - 1) f.addNewline(other[0].Pos()) } // Finally, join the imports, keeping std at the top. d.Specs = append(std, other...) // If we moved any std imports to the first group, we need to sort them // again. if needsSort { ast.SortImports(f.fset, f.astFile) } } // mergeAdjacentFields returns fields with adjacent fields merged if possible. func (f *fumpter) mergeAdjacentFields(fields []*ast.Field) []*ast.Field { // If there are less than two fields then there is nothing to merge. if len(fields) < 2 { return fields } // Otherwise, iterate over adjacent pairs of fields, merging if possible, // and mutating fields. Elements of fields may be mutated (if merged with // following fields), discarded (if merged with a preceding field), or left // unchanged. i := 0 for j := 1; j < len(fields); j++ { if f.shouldMergeAdjacentFields(fields[i], fields[j]) { fields[i].Names = append(fields[i].Names, fields[j].Names...) } else { i++ fields[i] = fields[j] } } return fields[:i+1] } func (f *fumpter) shouldMergeAdjacentFields(f1, f2 *ast.Field) bool { if len(f1.Names) == 0 || len(f2.Names) == 0 { // Both must have names for the merge to work. return false } if f.Line(f1.Pos()) != f.Line(f2.Pos()) { // Trust the user if they used separate lines. return false } // Only merge if the types are equal. opt := cmp.Comparer(func(x, y token.Pos) bool { return true }) return cmp.Equal(f1.Type, f2.Type, opt) } var posType = reflect.TypeOf(token.NoPos) // setPos recursively sets all position fields in the node v to pos. func setPos(v reflect.Value, pos token.Pos) { if v.Kind() == reflect.Ptr { v = v.Elem() } if !v.IsValid() { return } if v.Type() == posType { v.Set(reflect.ValueOf(pos)) } if v.Kind() == reflect.Struct { for i := 0; i < v.NumField(); i++ { setPos(v.Field(i), pos) } } } func containsAnyDirective(group *ast.CommentGroup) bool { if group == nil { return false } for _, comment := range group.List { body := strings.TrimPrefix(comment.Text, "//") if rxCommentDirective.MatchString(body) { return true } } return false } gofumpt-0.6.0/format/format_test.go000066400000000000000000000010401455555333400173530ustar00rootroot00000000000000// Copyright (c) 2021, Daniel Martí // See LICENSE for licensing information package format_test import ( "testing" qt "github.com/frankban/quicktest" "mvdan.cc/gofumpt/format" ) func TestSourceIncludesSimplify(t *testing.T) { t.Parallel() in := []byte(` package p var () func f() { for _ = range v { } } `[1:]) want := []byte(` package p func f() { for range v { } } `[1:]) got, err := format.Source(in, format.Options{}) qt.Assert(t, err, qt.IsNil) qt.Assert(t, string(got), qt.Equals, string(want)) } gofumpt-0.6.0/format/fuzz_test.go000066400000000000000000000034111455555333400170650ustar00rootroot00000000000000// Copyright (c) 2021, Daniel Martí // See LICENSE for licensing information package format import ( "errors" "fmt" "go/scanner" "path/filepath" "strings" "testing" qt "github.com/frankban/quicktest" "golang.org/x/tools/txtar" ) func FuzzFormat(f *testing.F) { // Initialize the corpus with the Go files from our test scripts. paths, err := filepath.Glob(filepath.Join("..", "testdata", "script", "*.txtar")) qt.Assert(f, err, qt.IsNil) qt.Assert(f, paths, qt.Not(qt.HasLen), 0) for _, path := range paths { archive, err := txtar.ParseFile(path) qt.Assert(f, err, qt.IsNil) for _, file := range archive.Files { f.Logf("adding %s from %s", file.Name, path) if strings.HasSuffix(file.Name, ".go") || strings.Contains(file.Name, ".go.") { f.Add(string(file.Data), int8(18), false) // -lang=1.18 f.Add(string(file.Data), int8(1), false) // -lang=1.1 f.Add(string(file.Data), int8(18), true) // -lang=1.18 -extra } } } f.Fuzz(func(t *testing.T, src string, majorVersion int8, // Empty version if negative, 1.N otherwise. extraRules bool, ) { // TODO: also fuzz Options.ModulePath opts := Options{ExtraRules: extraRules} if majorVersion >= 0 { opts.LangVersion = fmt.Sprintf("1.%d", majorVersion) } orig := []byte(src) formatted, err := Source(orig, opts) if errors.As(err, &scanner.ErrorList{}) { return // invalid syntax from parsing } qt.Assert(t, err, qt.IsNil) _ = formatted // TODO: verify that the result is idempotent // TODO: verify that, if the input was valid Go 1.N syntax, // so is the output (how? go/parser lacks an option) // TODO: check calling format.Node directly as well qt.Assert(t, string(orig), qt.Equals, src, qt.Commentf("input source bytes were modified")) }) } gofumpt-0.6.0/format/rewrite.go000066400000000000000000000057761455555333400165310ustar00rootroot00000000000000// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package format import ( "go/ast" "go/token" "reflect" "unicode" "unicode/utf8" ) // Values/types for special cases. var ( identType = reflect.TypeOf((*ast.Ident)(nil)) objectPtrType = reflect.TypeOf((*ast.Object)(nil)) positionType = reflect.TypeOf(token.NoPos) callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) ) func isWildcard(s string) bool { rune, size := utf8.DecodeRuneInString(s) return size == len(s) && unicode.IsLower(rune) } // match reports whether pattern matches val, // recording wildcard submatches in m. // If m == nil, match checks whether pattern == val. func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { // Wildcard matches any expression. If it appears multiple // times in the pattern, it must match the same expression // each time. if m != nil && pattern.IsValid() && pattern.Type() == identType { name := pattern.Interface().(*ast.Ident).Name if isWildcard(name) && val.IsValid() { // wildcards only match valid (non-nil) expressions. if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { if old, ok := m[name]; ok { return match(nil, old, val) } m[name] = val return true } } } // Otherwise, pattern and val must match recursively. if !pattern.IsValid() || !val.IsValid() { return !pattern.IsValid() && !val.IsValid() } if pattern.Type() != val.Type() { return false } // Special cases. switch pattern.Type() { case identType: // For identifiers, only the names need to match // (and none of the other *ast.Object information). // This is a common case, handle it all here instead // of recursing down any further via reflection. p := pattern.Interface().(*ast.Ident) v := val.Interface().(*ast.Ident) return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name case objectPtrType, positionType: // object pointers and token positions always match return true case callExprType: // For calls, the Ellipsis fields (token.Position) must // match since that is how f(x) and f(x...) are different. // Check them here but fall through for the remaining fields. p := pattern.Interface().(*ast.CallExpr) v := val.Interface().(*ast.CallExpr) if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { return false } } p := reflect.Indirect(pattern) v := reflect.Indirect(val) if !p.IsValid() || !v.IsValid() { return !p.IsValid() && !v.IsValid() } switch p.Kind() { case reflect.Slice: if p.Len() != v.Len() { return false } for i := 0; i < p.Len(); i++ { if !match(m, p.Index(i), v.Index(i)) { return false } } return true case reflect.Struct: for i := 0; i < p.NumField(); i++ { if !match(m, p.Field(i), v.Field(i)) { return false } } return true case reflect.Interface: return match(m, p.Elem(), v.Elem()) } // Handle token integers, etc. return p.Interface() == v.Interface() } gofumpt-0.6.0/format/simplify.go000066400000000000000000000114101455555333400166620ustar00rootroot00000000000000// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package format import ( "go/ast" "go/token" "reflect" ) type simplifier struct{} func (s simplifier) Visit(node ast.Node) ast.Visitor { switch n := node.(type) { case *ast.CompositeLit: // array, slice, and map composite literals may be simplified outer := n var keyType, eltType ast.Expr switch typ := outer.Type.(type) { case *ast.ArrayType: eltType = typ.Elt case *ast.MapType: keyType = typ.Key eltType = typ.Value } if eltType != nil { var ktyp reflect.Value if keyType != nil { ktyp = reflect.ValueOf(keyType) } typ := reflect.ValueOf(eltType) for i, x := range outer.Elts { px := &outer.Elts[i] // look at value of indexed/named elements if t, ok := x.(*ast.KeyValueExpr); ok { if keyType != nil { s.simplifyLiteral(ktyp, keyType, t.Key, &t.Key) } x = t.Value px = &t.Value } s.simplifyLiteral(typ, eltType, x, px) } // node was simplified - stop walk (there are no subnodes to simplify) return nil } case *ast.SliceExpr: // a slice expression of the form: s[a:len(s)] // can be simplified to: s[a:] // if s is "simple enough" (for now we only accept identifiers) // // Note: This may not be correct because len may have been redeclared in // the same package. However, this is extremely unlikely and so far // (April 2022, after years of supporting this rewrite feature) // has never come up, so let's keep it working as is (see also #15153). // // Also note that this code used to use go/ast's object tracking, // which was removed in exchange for go/parser.Mode.SkipObjectResolution. // False positives are extremely unlikely as described above, // and go/ast's object tracking is incomplete in any case. if n.Max != nil { // - 3-index slices always require the 2nd and 3rd index break } if s, _ := n.X.(*ast.Ident); s != nil { // the array/slice object is a single identifier if call, _ := n.High.(*ast.CallExpr); call != nil && len(call.Args) == 1 && !call.Ellipsis.IsValid() { // the high expression is a function call with a single argument if fun, _ := call.Fun.(*ast.Ident); fun != nil && fun.Name == "len" { // the function called is "len" if arg, _ := call.Args[0].(*ast.Ident); arg != nil && arg.Name == s.Name { // the len argument is the array/slice object n.High = nil } } } } // Note: We could also simplify slice expressions of the form s[0:b] to s[:b] // but we leave them as is since sometimes we want to be very explicit // about the lower bound. // An example where the 0 helps: // x, y, z := b[0:2], b[2:4], b[4:6] // An example where it does not: // x, y := b[:n], b[n:] case *ast.RangeStmt: // - a range of the form: for x, _ = range v {...} // can be simplified to: for x = range v {...} // - a range of the form: for _ = range v {...} // can be simplified to: for range v {...} if isBlank(n.Value) { n.Value = nil } if isBlank(n.Key) && n.Value == nil { n.Key = nil } } return s } func (s simplifier) simplifyLiteral(typ reflect.Value, astType, x ast.Expr, px *ast.Expr) { ast.Walk(s, x) // simplify x // if the element is a composite literal and its literal type // matches the outer literal's element type exactly, the inner // literal type may be omitted if inner, ok := x.(*ast.CompositeLit); ok { if match(nil, typ, reflect.ValueOf(inner.Type)) { inner.Type = nil } } // if the outer literal's element type is a pointer type *T // and the element is & of a composite literal of type T, // the inner &T may be omitted. if ptr, ok := astType.(*ast.StarExpr); ok { if addr, ok := x.(*ast.UnaryExpr); ok && addr.Op == token.AND { if inner, ok := addr.X.(*ast.CompositeLit); ok { if match(nil, reflect.ValueOf(ptr.X), reflect.ValueOf(inner.Type)) { inner.Type = nil // drop T *px = inner // drop & } } } } } func isBlank(x ast.Expr) bool { ident, ok := x.(*ast.Ident) return ok && ident.Name == "_" } func simplify(f *ast.File) { // remove empty declarations such as "const ()", etc removeEmptyDeclGroups(f) var s simplifier ast.Walk(s, f) } func removeEmptyDeclGroups(f *ast.File) { i := 0 for _, d := range f.Decls { if g, ok := d.(*ast.GenDecl); !ok || !isEmpty(f, g) { f.Decls[i] = d i++ } } f.Decls = f.Decls[:i] } func isEmpty(f *ast.File, g *ast.GenDecl) bool { if g.Doc != nil || g.Specs != nil { return false } for _, c := range f.Comments { // if there is a comment in the declaration, it is not considered empty if g.Pos() <= c.Pos() && c.End() <= g.End() { return false } } return true } gofumpt-0.6.0/format/testdata/000077500000000000000000000000001455555333400163135ustar00rootroot00000000000000gofumpt-0.6.0/format/testdata/fuzz/000077500000000000000000000000001455555333400173115ustar00rootroot00000000000000gofumpt-0.6.0/format/testdata/fuzz/FuzzFormat/000077500000000000000000000000001455555333400214205ustar00rootroot0000000000000018c862f09f82fe57f536e7ab4b1bd63daecc2cba8189530bb0eb77b8cef6f798000066400000000000000000000001541455555333400325550ustar00rootroot00000000000000gofumpt-0.6.0/format/testdata/fuzz/FuzzFormatgo test fuzz v1 string("package A\nfunc A000000000(A000000000000,\nA00000000)(){\"\"}") int8(62) bool(true) 948d1d5be3c838b207d345a3ac57e97bb3b77788cb5039a65994967490c49baa000066400000000000000000000001131455555333400317620ustar00rootroot00000000000000gofumpt-0.6.0/format/testdata/fuzz/FuzzFormatgo test fuzz v1 string("package A\nvar A A\nvar A A") int8(18) bool(false) gofumpt-0.6.0/gen_govendor.go000066400000000000000000000036201455555333400162160ustar00rootroot00000000000000// Copyright (c) 2019, Daniel Martí // See LICENSE for licensing information //go:build ignore package main import ( "bytes" "encoding/json" "io" "os" "os/exec" "path" "path/filepath" "strings" ) var ( modulePath = "mvdan.cc/gofumpt" vendorDir = filepath.Join("internal", "govendor") ) // All the packages which affect the formatting behavior. var toVendor = []string{ "go/format", "go/printer", "go/doc/comment", "internal/diff", } func main() { catch(os.RemoveAll(vendorDir)) catch(os.MkdirAll(vendorDir, 0o777)) out, err := exec.Command("go", "env", "GOVERSION").Output() catch(err) catch(os.WriteFile(filepath.Join(vendorDir, "version.txt"), out, 0o666)) oldnew := []string{ "//go:generate", "//disabled go:generate", } for _, pkgPath := range toVendor { oldnew = append(oldnew, pkgPath, path.Join(modulePath, vendorDir, pkgPath)) } replacer := strings.NewReplacer(oldnew...) listArgs := append([]string{"list", "-json"}, toVendor...) out, err = exec.Command("go", listArgs...).Output() catch(err) type Package struct { Dir string ImportPath string GoFiles []string } dec := json.NewDecoder(bytes.NewReader(out)) for { var pkg Package err := dec.Decode(&pkg) if err == io.EOF { break } catch(err) // Otherwise we can't import it. dstPkg := strings.TrimPrefix(pkg.ImportPath, "internal/") dstDir := filepath.Join(vendorDir, filepath.FromSlash(dstPkg)) catch(os.MkdirAll(dstDir, 0o777)) // TODO: if the packages start using build tags like GOOS or GOARCH, // we will need to vendor IgnoredGoFiles as well. for _, goFile := range pkg.GoFiles { srcBytes, err := os.ReadFile(filepath.Join(pkg.Dir, goFile)) catch(err) src := replacer.Replace(string(srcBytes)) dst := filepath.Join(dstDir, goFile) catch(os.WriteFile(dst, []byte(src), 0o666)) } } } func catch(err error) { if err != nil { panic(err) } } gofumpt-0.6.0/go.mod000066400000000000000000000005461455555333400143250ustar00rootroot00000000000000module mvdan.cc/gofumpt go 1.20 require ( github.com/frankban/quicktest v1.14.6 github.com/google/go-cmp v0.6.0 github.com/rogpeppe/go-internal v1.12.0 golang.org/x/mod v0.14.0 golang.org/x/sync v0.6.0 golang.org/x/sys v0.16.0 golang.org/x/tools v0.17.0 ) require ( github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect ) gofumpt-0.6.0/go.sum000066400000000000000000000034521455555333400143510ustar00rootroot00000000000000github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= gofumpt-0.6.0/gofmt.go000066400000000000000000000422221455555333400146570ustar00rootroot00000000000000// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "context" "encoding/json" "flag" "fmt" "go/ast" "go/parser" "go/scanner" "go/token" "io" "io/fs" "os" "os/exec" "path/filepath" "regexp" "runtime" "runtime/pprof" "strings" "sync" "golang.org/x/sync/semaphore" gformat "mvdan.cc/gofumpt/format" "mvdan.cc/gofumpt/internal/govendor/diff" "mvdan.cc/gofumpt/internal/govendor/go/printer" gversion "mvdan.cc/gofumpt/internal/version" ) //go:generate go run gen_govendor.go //go:generate go run . -w internal/govendor var ( // main operation modes list = flag.Bool("l", false, "") write = flag.Bool("w", false, "") doDiff = flag.Bool("d", false, "") allErrors = flag.Bool("e", false, "") // debugging cpuprofile = flag.String("cpuprofile", "", "") // gofumpt's own flags langVersion = flag.String("lang", "", "") modulePath = flag.String("modpath", "", "") extraRules = flag.Bool("extra", false, "") showVersion = flag.Bool("version", false, "") // DEPRECATED rewriteRule = flag.String("r", "", "") simplifyAST = flag.Bool("s", false, "") ) var version = "" // Keep these in sync with go/format/format.go. const ( tabWidth = 8 printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers // printerNormalizeNumbers means to canonicalize number literal prefixes // and exponents while printing. See https://golang.org/doc/go1.13#gofmt. // // This value is defined in go/printer specifically for go/format and cmd/gofmt. printerNormalizeNumbers = 1 << 30 ) // fdSem guards the number of concurrently-open file descriptors. // // For now, this is arbitrarily set to 200, based on the observation that many // platforms default to a kernel limit of 256. Ideally, perhaps we should derive // it from rlimit on platforms that support that system call. // // File descriptors opened from outside of this package are not tracked, // so this limit may be approximate. var fdSem = make(chan bool, 200) var ( fileSet = token.NewFileSet() // per process FileSet parserMode parser.Mode ) func usage() { fmt.Fprintf(os.Stderr, `usage: gofumpt [flags] [path ...] -version show version and exit -d display diffs instead of rewriting files -e report all errors (not just the first 10 on different lines) -l list files whose formatting differs from gofumpt's -w write result to (source) file instead of stdout -extra enable extra rules which should be vetted by a human -lang str target Go version in the form "1.X" (default from go.mod) -modpath str Go module path containing the source file (default from go.mod) `) } func initParserMode() { parserMode = parser.ParseComments | parser.SkipObjectResolution if *allErrors { parserMode |= parser.AllErrors } } func isGoFile(f fs.DirEntry) bool { // ignore non-Go files name := f.Name() return !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") && !f.IsDir() } var rxCodeGenerated = regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`) func isGenerated(file *ast.File) bool { for _, cg := range file.Comments { if cg.Pos() > file.Package { return false } for _, line := range cg.List { if rxCodeGenerated.MatchString(line.Text) { return true } } } return false } // A sequencer performs concurrent tasks that may write output, but emits that // output in a deterministic order. type sequencer struct { maxWeight int64 sem *semaphore.Weighted // weighted by input bytes (an approximate proxy for memory overhead) prev <-chan *reporterState // 1-buffered } // newSequencer returns a sequencer that allows concurrent tasks up to maxWeight // and writes tasks' output to out and err. func newSequencer(maxWeight int64, out, err io.Writer) *sequencer { sem := semaphore.NewWeighted(maxWeight) prev := make(chan *reporterState, 1) prev <- &reporterState{out: out, err: err} return &sequencer{ maxWeight: maxWeight, sem: sem, prev: prev, } } // exclusive is a weight that can be passed to a sequencer to cause // a task to be executed without any other concurrent tasks. const exclusive = -1 // Add blocks until the sequencer has enough weight to spare, then adds f as a // task to be executed concurrently. // // If the weight is either negative or larger than the sequencer's maximum // weight, Add blocks until all other tasks have completed, then the task // executes exclusively (blocking all other calls to Add until it completes). // // f may run concurrently in a goroutine, but its output to the passed-in // reporter will be sequential relative to the other tasks in the sequencer. // // If f invokes a method on the reporter, execution of that method may block // until the previous task has finished. (To maximize concurrency, f should // avoid invoking the reporter until it has finished any parallelizable work.) // // If f returns a non-nil error, that error will be reported after f's output // (if any) and will cause a nonzero final exit code. func (s *sequencer) Add(weight int64, f func(*reporter) error) { if weight < 0 || weight > s.maxWeight { weight = s.maxWeight } if err := s.sem.Acquire(context.TODO(), weight); err != nil { // Change the task from "execute f" to "report err". weight = 0 f = func(*reporter) error { return err } } r := &reporter{prev: s.prev} next := make(chan *reporterState, 1) s.prev = next // Start f in parallel: it can run until it invokes a method on r, at which // point it will block until the previous task releases the output state. go func() { if err := f(r); err != nil { r.Report(err) } next <- r.getState() // Release the next task. s.sem.Release(weight) }() } // AddReport prints an error to s after the output of any previously-added // tasks, causing the final exit code to be nonzero. func (s *sequencer) AddReport(err error) { s.Add(0, func(*reporter) error { return err }) } // GetExitCode waits for all previously-added tasks to complete, then returns an // exit code for the sequence suitable for passing to os.Exit. func (s *sequencer) GetExitCode() int { c := make(chan int, 1) s.Add(0, func(r *reporter) error { c <- r.ExitCode() return nil }) return <-c } // A reporter reports output, warnings, and errors. type reporter struct { prev <-chan *reporterState state *reporterState } // reporterState carries the state of a reporter instance. // // Only one reporter at a time may have access to a reporterState. type reporterState struct { out, err io.Writer exitCode int } // getState blocks until any prior reporters are finished with the reporter // state, then returns the state for manipulation. func (r *reporter) getState() *reporterState { if r.state == nil { r.state = <-r.prev } return r.state } // Warnf emits a warning message to the reporter's error stream, // without changing its exit code. func (r *reporter) Warnf(format string, args ...any) { fmt.Fprintf(r.getState().err, format, args...) } // Write emits a slice to the reporter's output stream. // // Any error is returned to the caller, and does not otherwise affect the // reporter's exit code. func (r *reporter) Write(p []byte) (int, error) { return r.getState().out.Write(p) } // Report emits a non-nil error to the reporter's error stream, // changing its exit code to a nonzero value. func (r *reporter) Report(err error) { if err == nil { panic("Report with nil error") } st := r.getState() scanner.PrintError(st.err, err) st.exitCode = 2 } func (r *reporter) ExitCode() int { return r.getState().exitCode } // If info == nil, we are formatting stdin instead of a file. // If in == nil, the source is the contents of the file with the given filename. func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter, explicit bool) error { src, err := readFile(filename, info, in) if err != nil { return err } fileSet := token.NewFileSet() fragmentOk := false if info == nil { // If we are formatting stdin, we accept a program fragment in lieu of a // complete source file. fragmentOk = true } file, sourceAdj, indentAdj, err := parse(fileSet, filename, src, fragmentOk) if err != nil { return err } ast.SortImports(fileSet, file) // Apply gofumpt's changes before we print the code in gofumpt's format. // If either -lang or -modpath aren't set, fetch them from go.mod. lang := *langVersion modpath := *modulePath if lang == "" || modpath == "" { dir := filepath.Dir(filename) mod, ok := moduleCacheByDir.Load(dir) if ok && mod != nil { mod := mod.(*module) if lang == "" { lang = mod.Go } if modpath == "" { modpath = mod.Module.Path } } } // We always apply the gofumpt formatting rules to explicit files, including stdin. // Otherwise, we don't apply them on generated files. // We also skip walking vendor directories entirely, but that happens elsewhere. if explicit || !isGenerated(file) { gformat.File(fileSet, file, gformat.Options{ LangVersion: lang, ModulePath: modpath, ExtraRules: *extraRules, }) } res, err := format(fileSet, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth}) if err != nil { return err } if !bytes.Equal(src, res) { // formatting has changed if *list { fmt.Fprintln(r, filename) } if *write { if info == nil { panic("-w should not have been allowed with stdin") } // make a temporary backup before overwriting original perm := info.Mode().Perm() bakname, err := backupFile(filename+".", src, perm) if err != nil { return err } fdSem <- true err = os.WriteFile(filename, res, perm) <-fdSem if err != nil { os.Rename(bakname, filename) return err } err = os.Remove(bakname) if err != nil { return err } } if *doDiff { newName := filepath.ToSlash(filename) oldName := newName + ".orig" r.Write(diff.Diff(oldName, src, newName, res)) } } if !*list && !*write && !*doDiff { _, err = r.Write(res) } return err } // readFile reads the contents of filename, described by info. // If in is non-nil, readFile reads directly from it. // Otherwise, readFile opens and reads the file itself, // with the number of concurrently-open files limited by fdSem. func readFile(filename string, info fs.FileInfo, in io.Reader) ([]byte, error) { if in == nil { fdSem <- true var err error f, err := os.Open(filename) if err != nil { return nil, err } in = f defer func() { f.Close() <-fdSem }() } // Compute the file's size and read its contents with minimal allocations. // // If we have the FileInfo from filepath.WalkDir, use it to make // a buffer of the right size and avoid ReadAll's reallocations. // // If the size is unknown (or bogus, or overflows an int), fall back to // a size-independent ReadAll. size := -1 if info != nil && info.Mode().IsRegular() && int64(int(info.Size())) == info.Size() { size = int(info.Size()) } if size+1 <= 0 { // The file is not known to be regular, so we don't have a reliable size for it. var err error src, err := io.ReadAll(in) if err != nil { return nil, err } return src, nil } // We try to read size+1 bytes so that we can detect modifications: if we // read more than size bytes, then the file was modified concurrently. // (If that happens, we could, say, append to src to finish the read, or // proceed with a truncated buffer — but the fact that it changed at all // indicates a possible race with someone editing the file, so we prefer to // stop to avoid corrupting it.) src := make([]byte, size+1) n, err := io.ReadFull(in, src) switch err { case nil, io.EOF, io.ErrUnexpectedEOF: // io.ReadFull returns io.EOF (for an empty file) or io.ErrUnexpectedEOF // (for a non-empty file) if the file was changed unexpectedly. Continue // with comparing file sizes in those cases. default: return nil, err } if n < size { return nil, fmt.Errorf("error: size of %s changed during reading (from %d to %d bytes)", filename, size, n) } else if n > size { return nil, fmt.Errorf("error: size of %s changed during reading (from %d to >=%d bytes)", filename, size, len(src)) } return src[:n], nil } func main() { os.Exit(main1()) } func main1() int { // Arbitrarily limit in-flight work to 2MiB times the number of threads. // // The actual overhead for the parse tree and output will depend on the // specifics of the file, but this at least keeps the footprint of the process // roughly proportional to GOMAXPROCS. maxWeight := (2 << 20) * int64(runtime.GOMAXPROCS(0)) s := newSequencer(maxWeight, os.Stdout, os.Stderr) // call gofmtMain in a separate function // so that it can use defer and have them // run before the exit. gofmtMain(s) return s.GetExitCode() } func gofmtMain(s *sequencer) { // Ensure our parsed files never start with base 1, // to ensure that using token.NoPos+1 will panic. fileSet.AddFile("gofumpt_base.go", 1, 10) flag.Usage = usage flag.Parse() if *simplifyAST { fmt.Fprintf(os.Stderr, "warning: -s is deprecated as it is always enabled\n") } if *rewriteRule != "" { fmt.Fprintf(os.Stderr, `the rewrite flag is no longer available; use "gofmt -r" instead`+"\n") os.Exit(2) } // Print the gofumpt version if the user asks for it. if *showVersion { fmt.Println(gversion.String(version)) return } if *cpuprofile != "" { fdSem <- true f, err := os.Create(*cpuprofile) if err != nil { s.AddReport(fmt.Errorf("creating cpu profile: %s", err)) return } defer func() { f.Close() <-fdSem }() pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } initParserMode() args := flag.Args() if len(args) == 0 { if *write { s.AddReport(fmt.Errorf("error: cannot use -w with standard input")) return } s.Add(0, func(r *reporter) error { // TODO: test explicit==true return processFile("", nil, os.Stdin, r, true) }) return } for _, arg := range args { switch info, err := os.Stat(arg); { case err != nil: s.AddReport(err) case !info.IsDir(): // Non-directory arguments are always formatted. arg := arg s.Add(fileWeight(arg, info), func(r *reporter) error { return processFile(arg, info, nil, r, true) }) default: // Directories are walked, ignoring non-Go files. err := filepath.WalkDir(arg, func(path string, f fs.DirEntry, err error) error { // vendor and testdata directories are skipped, // unless they are explicitly passed as an argument. base := filepath.Base(path) if path != arg && (base == "vendor" || base == "testdata") { return filepath.SkipDir } if err != nil || !isGoFile(f) { return err } info, err := f.Info() if err != nil { s.AddReport(err) return nil } s.Add(fileWeight(path, info), func(r *reporter) error { return processFile(path, info, nil, r, false) }) return nil }) if err != nil { s.AddReport(err) } } } } type module struct { Go string Module struct { Path string } } func loadModuleInfo(dir string) any { cmd := exec.Command("go", "mod", "edit", "-json") cmd.Dir = dir // Spawning "go mod edit" will open files by design, // such as the named pipe to obtain stdout. // TODO(mvdan): if we run into "too many open files" errors again in the // future, we probably need to turn fdSem into a weighted semaphore so this // operation can acquire a weight larger than 1. fdSem <- true out, err := cmd.Output() defer func() { <-fdSem }() if err != nil || len(out) == 0 { return nil } mod := new(module) if err := json.Unmarshal(out, mod); err != nil { return nil } return mod } // Written to by fileWeight, read from fileWeight and processFile. // A present but nil value means that loading the module info failed. // Note that we don't require the keys to be absolute directories, // so duplicates are possible. The same can happen with symlinks. var moduleCacheByDir sync.Map // map[dirString]*module func fileWeight(path string, info fs.FileInfo) int64 { dir := filepath.Dir(path) if _, ok := moduleCacheByDir.Load(dir); !ok { moduleCacheByDir.Store(dir, loadModuleInfo(dir)) } if info == nil { return exclusive } if info.Mode().Type() == fs.ModeSymlink { var err error info, err = os.Stat(path) if err != nil { return exclusive } } if !info.Mode().IsRegular() { // For non-regular files, FileInfo.Size is system-dependent and thus not a // reliable indicator of weight. return exclusive } return info.Size() } const chmodSupported = runtime.GOOS != "windows" // backupFile writes data to a new file named filename with permissions perm, // with 0 && isSpace(src[i-1]) { i-- } return append(res, src[i:]...), nil } // isSpace reports whether the byte is a space character. // isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'. func isSpace(b byte) bool { return b == ' ' || b == '\t' || b == '\n' || b == '\r' } gofumpt-0.6.0/internal/000077500000000000000000000000001455555333400150265ustar00rootroot00000000000000gofumpt-0.6.0/internal/govendor/000077500000000000000000000000001455555333400166515ustar00rootroot00000000000000gofumpt-0.6.0/internal/govendor/diff/000077500000000000000000000000001455555333400175615ustar00rootroot00000000000000gofumpt-0.6.0/internal/govendor/diff/diff.go000066400000000000000000000170531455555333400210260ustar00rootroot00000000000000// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package diff import ( "bytes" "fmt" "sort" "strings" ) // A pair is a pair of values tracked for both the x and y side of a diff. // It is typically a pair of line indexes. type pair struct{ x, y int } // Diff returns an anchored diff of the two texts old and new // in the “unified diff” format. If old and new are identical, // Diff returns a nil slice (no output). // // Unix diff implementations typically look for a diff with // the smallest number of lines inserted and removed, // which can in the worst case take time quadratic in the // number of lines in the texts. As a result, many implementations // either can be made to run for a long time or cut off the search // after a predetermined amount of work. // // In contrast, this implementation looks for a diff with the // smallest number of “unique” lines inserted and removed, // where unique means a line that appears just once in both old and new. // We call this an “anchored diff” because the unique lines anchor // the chosen matching regions. An anchored diff is usually clearer // than a standard diff, because the algorithm does not try to // reuse unrelated blank lines or closing braces. // The algorithm also guarantees to run in O(n log n) time // instead of the standard O(n²) time. // // Some systems call this approach a “patience diff,” named for // the “patience sorting” algorithm, itself named for a solitaire card game. // We avoid that name for two reasons. First, the name has been used // for a few different variants of the algorithm, so it is imprecise. // Second, the name is frequently interpreted as meaning that you have // to wait longer (to be patient) for the diff, meaning that it is a slower algorithm, // when in fact the algorithm is faster than the standard one. func Diff(oldName string, old []byte, newName string, new []byte) []byte { if bytes.Equal(old, new) { return nil } x := lines(old) y := lines(new) // Print diff header. var out bytes.Buffer fmt.Fprintf(&out, "diff %s %s\n", oldName, newName) fmt.Fprintf(&out, "--- %s\n", oldName) fmt.Fprintf(&out, "+++ %s\n", newName) // Loop over matches to consider, // expanding each match to include surrounding lines, // and then printing diff chunks. // To avoid setup/teardown cases outside the loop, // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair // in the sequence of matches. var ( done pair // printed up to x[:done.x] and y[:done.y] chunk pair // start lines of current chunk count pair // number of lines from each side in current chunk ctext []string // lines for current chunk ) for _, m := range tgs(x, y) { if m.x < done.x { // Already handled scanning forward from earlier match. continue } // Expand matching lines as far possible, // establishing that x[start.x:end.x] == y[start.y:end.y]. // Note that on the first (or last) iteration we may (or definitely do) // have an empty match: start.x==end.x and start.y==end.y. start := m for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] { start.x-- start.y-- } end := m for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] { end.x++ end.y++ } // Emit the mismatched lines before start into this chunk. // (No effect on first sentinel iteration, when start = {0,0}.) for _, s := range x[done.x:start.x] { ctext = append(ctext, "-"+s) count.x++ } for _, s := range y[done.y:start.y] { ctext = append(ctext, "+"+s) count.y++ } // If we're not at EOF and have too few common lines, // the chunk includes all the common lines and continues. const C = 3 // number of context lines if (end.x < len(x) || end.y < len(y)) && (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) { for _, s := range x[start.x:end.x] { ctext = append(ctext, " "+s) count.x++ count.y++ } done = end continue } // End chunk with common lines for context. if len(ctext) > 0 { n := end.x - start.x if n > C { n = C } for _, s := range x[start.x : start.x+n] { ctext = append(ctext, " "+s) count.x++ count.y++ } done = pair{start.x + n, start.y + n} // Format and emit chunk. // Convert line numbers to 1-indexed. // Special case: empty file shows up as 0,0 not 1,0. if count.x > 0 { chunk.x++ } if count.y > 0 { chunk.y++ } fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y) for _, s := range ctext { out.WriteString(s) } count.x = 0 count.y = 0 ctext = ctext[:0] } // If we reached EOF, we're done. if end.x >= len(x) && end.y >= len(y) { break } // Otherwise start a new chunk. chunk = pair{end.x - C, end.y - C} for _, s := range x[chunk.x:end.x] { ctext = append(ctext, " "+s) count.x++ count.y++ } done = end } return out.Bytes() } // lines returns the lines in the file x, including newlines. // If the file does not end in a newline, one is supplied // along with a warning about the missing newline. func lines(x []byte) []string { l := strings.SplitAfter(string(x), "\n") if l[len(l)-1] == "" { l = l[:len(l)-1] } else { // Treat last line as having a message about the missing newline attached, // using the same text as BSD/GNU diff (including the leading backslash). l[len(l)-1] += "\n\\ No newline at end of file\n" } return l } // tgs returns the pairs of indexes of the longest common subsequence // of unique lines in x and y, where a unique line is one that appears // once in x and once in y. // // The longest common subsequence algorithm is as described in // Thomas G. Szymanski, “A Special Case of the Maximal Common // Subsequence Problem,” Princeton TR #170 (January 1975), // available at https://research.swtch.com/tgs170.pdf. func tgs(x, y []string) []pair { // Count the number of times each string appears in a and b. // We only care about 0, 1, many, counted as 0, -1, -2 // for the x side and 0, -4, -8 for the y side. // Using negative numbers now lets us distinguish positive line numbers later. m := make(map[string]int) for _, s := range x { if c := m[s]; c > -2 { m[s] = c - 1 } } for _, s := range y { if c := m[s]; c > -8 { m[s] = c - 4 } } // Now unique strings can be identified by m[s] = -1+-4. // // Gather the indexes of those strings in x and y, building: // xi[i] = increasing indexes of unique strings in x. // yi[i] = increasing indexes of unique strings in y. // inv[i] = index j such that x[xi[i]] = y[yi[j]]. var xi, yi, inv []int for i, s := range y { if m[s] == -1+-4 { m[s] = len(yi) yi = append(yi, i) } } for i, s := range x { if j, ok := m[s]; ok && j >= 0 { xi = append(xi, i) inv = append(inv, j) } } // Apply Algorithm A from Szymanski's paper. // In those terms, A = J = inv and B = [0, n). // We add sentinel pairs {0,0}, and {len(x),len(y)} // to the returned sequence, to help the processing loop. J := inv n := len(xi) T := make([]int, n) L := make([]int, n) for i := range T { T[i] = n + 1 } for i := 0; i < n; i++ { k := sort.Search(n, func(k int) bool { return T[k] >= J[i] }) T[k] = J[i] L[i] = k + 1 } k := 0 for _, v := range L { if k < v { k = v } } seq := make([]pair, 2+k) seq[1+k] = pair{len(x), len(y)} // sentinel at end lastj := n for i := n - 1; i >= 0; i-- { if L[i] == k && J[i] < lastj { seq[k] = pair{xi[i], yi[J[i]]} k-- } } seq[0] = pair{0, 0} // sentinel at start return seq } gofumpt-0.6.0/internal/govendor/go/000077500000000000000000000000001455555333400172565ustar00rootroot00000000000000gofumpt-0.6.0/internal/govendor/go/doc/000077500000000000000000000000001455555333400200235ustar00rootroot00000000000000gofumpt-0.6.0/internal/govendor/go/doc/comment/000077500000000000000000000000001455555333400214655ustar00rootroot00000000000000gofumpt-0.6.0/internal/govendor/go/doc/comment/doc.go000066400000000000000000000024271455555333400225660ustar00rootroot00000000000000// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package comment implements parsing and reformatting of Go doc comments, (documentation comments), which are comments that immediately precede a top-level declaration of a package, const, func, type, or var. Go doc comment syntax is a simplified subset of Markdown that supports links, headings, paragraphs, lists (without nesting), and preformatted text blocks. The details of the syntax are documented at https://go.dev/doc/comment. To parse the text associated with a doc comment (after removing comment markers), use a [Parser]: var p comment.Parser doc := p.Parse(text) The result is a [*Doc]. To reformat it as a doc comment, HTML, Markdown, or plain text, use a [Printer]: var pr comment.Printer os.Stdout.Write(pr.Text(doc)) The [Parser] and [Printer] types are structs whose fields can be modified to customize the operations. For details, see the documentation for those types. Use cases that need additional control over reformatting can implement their own logic by inspecting the parsed syntax itself. See the documentation for [Doc], [Block], [Text] for an overview and links to additional types. */ package comment gofumpt-0.6.0/internal/govendor/go/doc/comment/html.go000066400000000000000000000067341455555333400227720ustar00rootroot00000000000000// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package comment import ( "bytes" "fmt" "strconv" ) // An htmlPrinter holds the state needed for printing a Doc as HTML. type htmlPrinter struct { *Printer tight bool } // HTML returns an HTML formatting of the Doc. // See the [Printer] documentation for ways to customize the HTML output. func (p *Printer) HTML(d *Doc) []byte { hp := &htmlPrinter{Printer: p} var out bytes.Buffer for _, x := range d.Content { hp.block(&out, x) } return out.Bytes() } // block prints the block x to out. func (p *htmlPrinter) block(out *bytes.Buffer, x Block) { switch x := x.(type) { default: fmt.Fprintf(out, "?%T", x) case *Paragraph: if !p.tight { out.WriteString("

") } p.text(out, x.Text) out.WriteString("\n") case *Heading: out.WriteString("") p.text(out, x.Text) out.WriteString("\n") case *Code: out.WriteString("

")
		p.escape(out, x.Text)
		out.WriteString("
\n") case *List: kind := "ol>\n" if x.Items[0].Number == "" { kind = "ul>\n" } out.WriteString("<") out.WriteString(kind) next := "1" for _, item := range x.Items { out.WriteString("") p.tight = !x.BlankBetween() for _, blk := range item.Content { p.block(out, blk) } p.tight = false } out.WriteString("= 0; i-- { if b[i] < '9' { b[i]++ return string(b) } b[i] = '0' } return "1" + string(b) } // text prints the text sequence x to out. func (p *htmlPrinter) text(out *bytes.Buffer, x []Text) { for _, t := range x { switch t := t.(type) { case Plain: p.escape(out, string(t)) case Italic: out.WriteString("") p.escape(out, string(t)) out.WriteString("") case *Link: out.WriteString(``) p.text(out, t.Text) out.WriteString("") case *DocLink: url := p.docLinkURL(t) if url != "" { out.WriteString(``) } p.text(out, t.Text) if url != "" { out.WriteString("") } } } } // escape prints s to out as plain text, // escaping < & " ' and > to avoid being misinterpreted // in larger HTML constructs. func (p *htmlPrinter) escape(out *bytes.Buffer, s string) { start := 0 for i := 0; i < len(s); i++ { switch s[i] { case '<': out.WriteString(s[start:i]) out.WriteString("<") start = i + 1 case '&': out.WriteString(s[start:i]) out.WriteString("&") start = i + 1 case '"': out.WriteString(s[start:i]) out.WriteString(""") start = i + 1 case '\'': out.WriteString(s[start:i]) out.WriteString("'") start = i + 1 case '>': out.WriteString(s[start:i]) out.WriteString(">") start = i + 1 } } out.WriteString(s[start:]) } gofumpt-0.6.0/internal/govendor/go/doc/comment/markdown.go000066400000000000000000000107011455555333400236350ustar00rootroot00000000000000// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package comment import ( "bytes" "fmt" "strings" ) // An mdPrinter holds the state needed for printing a Doc as Markdown. type mdPrinter struct { *Printer headingPrefix string raw bytes.Buffer } // Markdown returns a Markdown formatting of the Doc. // See the [Printer] documentation for ways to customize the Markdown output. func (p *Printer) Markdown(d *Doc) []byte { mp := &mdPrinter{ Printer: p, headingPrefix: strings.Repeat("#", p.headingLevel()) + " ", } var out bytes.Buffer for i, x := range d.Content { if i > 0 { out.WriteByte('\n') } mp.block(&out, x) } return out.Bytes() } // block prints the block x to out. func (p *mdPrinter) block(out *bytes.Buffer, x Block) { switch x := x.(type) { default: fmt.Fprintf(out, "?%T", x) case *Paragraph: p.text(out, x.Text) out.WriteString("\n") case *Heading: out.WriteString(p.headingPrefix) p.text(out, x.Text) if id := p.headingID(x); id != "" { out.WriteString(" {#") out.WriteString(id) out.WriteString("}") } out.WriteString("\n") case *Code: md := x.Text for md != "" { var line string line, md, _ = strings.Cut(md, "\n") if line != "" { out.WriteString("\t") out.WriteString(line) } out.WriteString("\n") } case *List: loose := x.BlankBetween() for i, item := range x.Items { if i > 0 && loose { out.WriteString("\n") } if n := item.Number; n != "" { out.WriteString(" ") out.WriteString(n) out.WriteString(". ") } else { out.WriteString(" - ") // SP SP - SP } for i, blk := range item.Content { const fourSpace = " " if i > 0 { out.WriteString("\n" + fourSpace) } p.text(out, blk.(*Paragraph).Text) out.WriteString("\n") } } } } // text prints the text sequence x to out. func (p *mdPrinter) text(out *bytes.Buffer, x []Text) { p.raw.Reset() p.rawText(&p.raw, x) line := bytes.TrimSpace(p.raw.Bytes()) if len(line) == 0 { return } switch line[0] { case '+', '-', '*', '#': // Escape what would be the start of an unordered list or heading. out.WriteByte('\\') case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': i := 1 for i < len(line) && '0' <= line[i] && line[i] <= '9' { i++ } if i < len(line) && (line[i] == '.' || line[i] == ')') { // Escape what would be the start of an ordered list. out.Write(line[:i]) out.WriteByte('\\') line = line[i:] } } out.Write(line) } // rawText prints the text sequence x to out, // without worrying about escaping characters // that have special meaning at the start of a Markdown line. func (p *mdPrinter) rawText(out *bytes.Buffer, x []Text) { for _, t := range x { switch t := t.(type) { case Plain: p.escape(out, string(t)) case Italic: out.WriteString("*") p.escape(out, string(t)) out.WriteString("*") case *Link: out.WriteString("[") p.rawText(out, t.Text) out.WriteString("](") out.WriteString(t.URL) out.WriteString(")") case *DocLink: url := p.docLinkURL(t) if url != "" { out.WriteString("[") } p.rawText(out, t.Text) if url != "" { out.WriteString("](") url = strings.ReplaceAll(url, "(", "%28") url = strings.ReplaceAll(url, ")", "%29") out.WriteString(url) out.WriteString(")") } } } } // escape prints s to out as plain text, // escaping special characters to avoid being misinterpreted // as Markdown markup sequences. func (p *mdPrinter) escape(out *bytes.Buffer, s string) { start := 0 for i := 0; i < len(s); i++ { switch s[i] { case '\n': // Turn all \n into spaces, for a few reasons: // - Avoid introducing paragraph breaks accidentally. // - Avoid the need to reindent after the newline. // - Avoid problems with Markdown renderers treating // every mid-paragraph newline as a
. out.WriteString(s[start:i]) out.WriteByte(' ') start = i + 1 continue case '`', '_', '*', '[', '<', '\\': // Not all of these need to be escaped all the time, // but is valid and easy to do so. // We assume the Markdown is being passed to a // Markdown renderer, not edited by a person, // so it's fine to have escapes that are not strictly // necessary in some cases. out.WriteString(s[start:i]) out.WriteByte('\\') out.WriteByte(s[i]) start = i + 1 } } out.WriteString(s[start:]) } gofumpt-0.6.0/internal/govendor/go/doc/comment/parse.go000066400000000000000000001034331455555333400231320ustar00rootroot00000000000000// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package comment import ( "sort" "strings" "unicode" "unicode/utf8" ) // A Doc is a parsed Go doc comment. type Doc struct { // Content is the sequence of content blocks in the comment. Content []Block // Links is the link definitions in the comment. Links []*LinkDef } // A LinkDef is a single link definition. type LinkDef struct { Text string // the link text URL string // the link URL Used bool // whether the comment uses the definition } // A Block is block-level content in a doc comment, // one of [*Code], [*Heading], [*List], or [*Paragraph]. type Block interface { block() } // A Heading is a doc comment heading. type Heading struct { Text []Text // the heading text } func (*Heading) block() {} // A List is a numbered or bullet list. // Lists are always non-empty: len(Items) > 0. // In a numbered list, every Items[i].Number is a non-empty string. // In a bullet list, every Items[i].Number is an empty string. type List struct { // Items is the list items. Items []*ListItem // ForceBlankBefore indicates that the list must be // preceded by a blank line when reformatting the comment, // overriding the usual conditions. See the BlankBefore method. // // The comment parser sets ForceBlankBefore for any list // that is preceded by a blank line, to make sure // the blank line is preserved when printing. ForceBlankBefore bool // ForceBlankBetween indicates that list items must be // separated by blank lines when reformatting the comment, // overriding the usual conditions. See the BlankBetween method. // // The comment parser sets ForceBlankBetween for any list // that has a blank line between any two of its items, to make sure // the blank lines are preserved when printing. ForceBlankBetween bool } func (*List) block() {} // BlankBefore reports whether a reformatting of the comment // should include a blank line before the list. // The default rule is the same as for [BlankBetween]: // if the list item content contains any blank lines // (meaning at least one item has multiple paragraphs) // then the list itself must be preceded by a blank line. // A preceding blank line can be forced by setting [List].ForceBlankBefore. func (l *List) BlankBefore() bool { return l.ForceBlankBefore || l.BlankBetween() } // BlankBetween reports whether a reformatting of the comment // should include a blank line between each pair of list items. // The default rule is that if the list item content contains any blank lines // (meaning at least one item has multiple paragraphs) // then list items must themselves be separated by blank lines. // Blank line separators can be forced by setting [List].ForceBlankBetween. func (l *List) BlankBetween() bool { if l.ForceBlankBetween { return true } for _, item := range l.Items { if len(item.Content) != 1 { // Unreachable for parsed comments today, // since the only way to get multiple item.Content // is multiple paragraphs, which must have been // separated by a blank line. return true } } return false } // A ListItem is a single item in a numbered or bullet list. type ListItem struct { // Number is a decimal string in a numbered list // or an empty string in a bullet list. Number string // "1", "2", ...; "" for bullet list // Content is the list content. // Currently, restrictions in the parser and printer // require every element of Content to be a *Paragraph. Content []Block // Content of this item. } // A Paragraph is a paragraph of text. type Paragraph struct { Text []Text } func (*Paragraph) block() {} // A Code is a preformatted code block. type Code struct { // Text is the preformatted text, ending with a newline character. // It may be multiple lines, each of which ends with a newline character. // It is never empty, nor does it start or end with a blank line. Text string } func (*Code) block() {} // A Text is text-level content in a doc comment, // one of [Plain], [Italic], [*Link], or [*DocLink]. type Text interface { text() } // A Plain is a string rendered as plain text (not italicized). type Plain string func (Plain) text() {} // An Italic is a string rendered as italicized text. type Italic string func (Italic) text() {} // A Link is a link to a specific URL. type Link struct { Auto bool // is this an automatic (implicit) link of a literal URL? Text []Text // text of link URL string // target URL of link } func (*Link) text() {} // A DocLink is a link to documentation for a Go package or symbol. type DocLink struct { Text []Text // text of link // ImportPath, Recv, and Name identify the Go package or symbol // that is the link target. The potential combinations of // non-empty fields are: // - ImportPath: a link to another package // - ImportPath, Name: a link to a const, func, type, or var in another package // - ImportPath, Recv, Name: a link to a method in another package // - Name: a link to a const, func, type, or var in this package // - Recv, Name: a link to a method in this package ImportPath string // import path Recv string // receiver type, without any pointer star, for methods Name string // const, func, type, var, or method name } func (*DocLink) text() {} // A Parser is a doc comment parser. // The fields in the struct can be filled in before calling Parse // in order to customize the details of the parsing process. type Parser struct { // Words is a map of Go identifier words that // should be italicized and potentially linked. // If Words[w] is the empty string, then the word w // is only italicized. Otherwise it is linked, using // Words[w] as the link target. // Words corresponds to the [go/doc.ToHTML] words parameter. Words map[string]string // LookupPackage resolves a package name to an import path. // // If LookupPackage(name) returns ok == true, then [name] // (or [name.Sym] or [name.Sym.Method]) // is considered a documentation link to importPath's package docs. // It is valid to return "", true, in which case name is considered // to refer to the current package. // // If LookupPackage(name) returns ok == false, // then [name] (or [name.Sym] or [name.Sym.Method]) // will not be considered a documentation link, // except in the case where name is the full (but single-element) import path // of a package in the standard library, such as in [math] or [io.Reader]. // LookupPackage is still called for such names, // in order to permit references to imports of other packages // with the same package names. // // Setting LookupPackage to nil is equivalent to setting it to // a function that always returns "", false. LookupPackage func(name string) (importPath string, ok bool) // LookupSym reports whether a symbol name or method name // exists in the current package. // // If LookupSym("", "Name") returns true, then [Name] // is considered a documentation link for a const, func, type, or var. // // Similarly, if LookupSym("Recv", "Name") returns true, // then [Recv.Name] is considered a documentation link for // type Recv's method Name. // // Setting LookupSym to nil is equivalent to setting it to a function // that always returns false. LookupSym func(recv, name string) (ok bool) } // parseDoc is parsing state for a single doc comment. type parseDoc struct { *Parser *Doc links map[string]*LinkDef lines []string lookupSym func(recv, name string) bool } // lookupPkg is called to look up the pkg in [pkg], [pkg.Name], and [pkg.Name.Recv]. // If pkg has a slash, it is assumed to be the full import path and is returned with ok = true. // // Otherwise, pkg is probably a simple package name like "rand" (not "crypto/rand" or "math/rand"). // d.LookupPackage provides a way for the caller to allow resolving such names with reference // to the imports in the surrounding package. // // There is one collision between these two cases: single-element standard library names // like "math" are full import paths but don't contain slashes. We let d.LookupPackage have // the first chance to resolve it, in case there's a different package imported as math, // and otherwise we refer to a built-in list of single-element standard library package names. func (d *parseDoc) lookupPkg(pkg string) (importPath string, ok bool) { if strings.Contains(pkg, "/") { // assume a full import path if validImportPath(pkg) { return pkg, true } return "", false } if d.LookupPackage != nil { // Give LookupPackage a chance. if path, ok := d.LookupPackage(pkg); ok { return path, true } } return DefaultLookupPackage(pkg) } func isStdPkg(path string) bool { // TODO(rsc): Use sort.Find once we don't have to worry about // copying this code into older Go environments. i := sort.Search(len(stdPkgs), func(i int) bool { return stdPkgs[i] >= path }) return i < len(stdPkgs) && stdPkgs[i] == path } // DefaultLookupPackage is the default package lookup // function, used when [Parser].LookupPackage is nil. // It recognizes names of the packages from the standard // library with single-element import paths, such as math, // which would otherwise be impossible to name. // // Note that the go/doc package provides a more sophisticated // lookup based on the imports used in the current package. func DefaultLookupPackage(name string) (importPath string, ok bool) { if isStdPkg(name) { return name, true } return "", false } // Parse parses the doc comment text and returns the *Doc form. // Comment markers (/* // and */) in the text must have already been removed. func (p *Parser) Parse(text string) *Doc { lines := unindent(strings.Split(text, "\n")) d := &parseDoc{ Parser: p, Doc: new(Doc), links: make(map[string]*LinkDef), lines: lines, lookupSym: func(recv, name string) bool { return false }, } if p.LookupSym != nil { d.lookupSym = p.LookupSym } // First pass: break into block structure and collect known links. // The text is all recorded as Plain for now. var prev span for _, s := range parseSpans(lines) { var b Block switch s.kind { default: panic("mvdan.cc/gofumpt/internal/govendor/go/doc/comment: internal error: unknown span kind") case spanList: b = d.list(lines[s.start:s.end], prev.end < s.start) case spanCode: b = d.code(lines[s.start:s.end]) case spanOldHeading: b = d.oldHeading(lines[s.start]) case spanHeading: b = d.heading(lines[s.start]) case spanPara: b = d.paragraph(lines[s.start:s.end]) } if b != nil { d.Content = append(d.Content, b) } prev = s } // Second pass: interpret all the Plain text now that we know the links. for _, b := range d.Content { switch b := b.(type) { case *Paragraph: b.Text = d.parseLinkedText(string(b.Text[0].(Plain))) case *List: for _, i := range b.Items { for _, c := range i.Content { p := c.(*Paragraph) p.Text = d.parseLinkedText(string(p.Text[0].(Plain))) } } } } return d.Doc } // A span represents a single span of comment lines (lines[start:end]) // of an identified kind (code, heading, paragraph, and so on). type span struct { start int end int kind spanKind } // A spanKind describes the kind of span. type spanKind int const ( _ spanKind = iota spanCode spanHeading spanList spanOldHeading spanPara ) func parseSpans(lines []string) []span { var spans []span // The loop may process a line twice: once as unindented // and again forced indented. So the maximum expected // number of iterations is 2*len(lines). The repeating logic // can be subtle, though, and to protect against introduction // of infinite loops in future changes, we watch to see that // we are not looping too much. A panic is better than a // quiet infinite loop. watchdog := 2 * len(lines) i := 0 forceIndent := 0 Spans: for { // Skip blank lines. for i < len(lines) && lines[i] == "" { i++ } if i >= len(lines) { break } if watchdog--; watchdog < 0 { panic("mvdan.cc/gofumpt/internal/govendor/go/doc/comment: internal error: not making progress") } var kind spanKind start := i end := i if i < forceIndent || indented(lines[i]) { // Indented (or force indented). // Ends before next unindented. (Blank lines are OK.) // If this is an unindented list that we are heuristically treating as indented, // then accept unindented list item lines up to the first blank lines. // The heuristic is disabled at blank lines to contain its effect // to non-gofmt'ed sections of the comment. unindentedListOK := isList(lines[i]) && i < forceIndent i++ for i < len(lines) && (lines[i] == "" || i < forceIndent || indented(lines[i]) || (unindentedListOK && isList(lines[i]))) { if lines[i] == "" { unindentedListOK = false } i++ } // Drop trailing blank lines. end = i for end > start && lines[end-1] == "" { end-- } // If indented lines are followed (without a blank line) // by an unindented line ending in a brace, // take that one line too. This fixes the common mistake // of pasting in something like // // func main() { // fmt.Println("hello, world") // } // // and forgetting to indent it. // The heuristic will never trigger on a gofmt'ed comment, // because any gofmt'ed code block or list would be // followed by a blank line or end of comment. if end < len(lines) && strings.HasPrefix(lines[end], "}") { end++ } if isList(lines[start]) { kind = spanList } else { kind = spanCode } } else { // Unindented. Ends at next blank or indented line. i++ for i < len(lines) && lines[i] != "" && !indented(lines[i]) { i++ } end = i // If unindented lines are followed (without a blank line) // by an indented line that would start a code block, // check whether the final unindented lines // should be left for the indented section. // This can happen for the common mistakes of // unindented code or unindented lists. // The heuristic will never trigger on a gofmt'ed comment, // because any gofmt'ed code block would have a blank line // preceding it after the unindented lines. if i < len(lines) && lines[i] != "" && !isList(lines[i]) { switch { case isList(lines[i-1]): // If the final unindented line looks like a list item, // this may be the first indented line wrap of // a mistakenly unindented list. // Leave all the unindented list items. forceIndent = end end-- for end > start && isList(lines[end-1]) { end-- } case strings.HasSuffix(lines[i-1], "{") || strings.HasSuffix(lines[i-1], `\`): // If the final unindented line ended in { or \ // it is probably the start of a misindented code block. // Give the user a single line fix. // Often that's enough; if not, the user can fix the others themselves. forceIndent = end end-- } if start == end && forceIndent > start { i = start continue Spans } } // Span is either paragraph or heading. if end-start == 1 && isHeading(lines[start]) { kind = spanHeading } else if end-start == 1 && isOldHeading(lines[start], lines, start) { kind = spanOldHeading } else { kind = spanPara } } spans = append(spans, span{start, end, kind}) i = end } return spans } // indented reports whether line is indented // (starts with a leading space or tab). func indented(line string) bool { return line != "" && (line[0] == ' ' || line[0] == '\t') } // unindent removes any common space/tab prefix // from each line in lines, returning a copy of lines in which // those prefixes have been trimmed from each line. // It also replaces any lines containing only spaces with blank lines (empty strings). func unindent(lines []string) []string { // Trim leading and trailing blank lines. for len(lines) > 0 && isBlank(lines[0]) { lines = lines[1:] } for len(lines) > 0 && isBlank(lines[len(lines)-1]) { lines = lines[:len(lines)-1] } if len(lines) == 0 { return nil } // Compute and remove common indentation. prefix := leadingSpace(lines[0]) for _, line := range lines[1:] { if !isBlank(line) { prefix = commonPrefix(prefix, leadingSpace(line)) } } out := make([]string, len(lines)) for i, line := range lines { line = strings.TrimPrefix(line, prefix) if strings.TrimSpace(line) == "" { line = "" } out[i] = line } for len(out) > 0 && out[0] == "" { out = out[1:] } for len(out) > 0 && out[len(out)-1] == "" { out = out[:len(out)-1] } return out } // isBlank reports whether s is a blank line. func isBlank(s string) bool { return len(s) == 0 || (len(s) == 1 && s[0] == '\n') } // commonPrefix returns the longest common prefix of a and b. func commonPrefix(a, b string) string { i := 0 for i < len(a) && i < len(b) && a[i] == b[i] { i++ } return a[0:i] } // leadingSpace returns the longest prefix of s consisting of spaces and tabs. func leadingSpace(s string) string { i := 0 for i < len(s) && (s[i] == ' ' || s[i] == '\t') { i++ } return s[:i] } // isOldHeading reports whether line is an old-style section heading. // line is all[off]. func isOldHeading(line string, all []string, off int) bool { if off <= 0 || all[off-1] != "" || off+2 >= len(all) || all[off+1] != "" || leadingSpace(all[off+2]) != "" { return false } line = strings.TrimSpace(line) // a heading must start with an uppercase letter r, _ := utf8.DecodeRuneInString(line) if !unicode.IsLetter(r) || !unicode.IsUpper(r) { return false } // it must end in a letter or digit: r, _ = utf8.DecodeLastRuneInString(line) if !unicode.IsLetter(r) && !unicode.IsDigit(r) { return false } // exclude lines with illegal characters. we allow "()," if strings.ContainsAny(line, ";:!?+*/=[]{}_^°&§~%#@<\">\\") { return false } // allow "'" for possessive "'s" only for b := line; ; { var ok bool if _, b, ok = strings.Cut(b, "'"); !ok { break } if b != "s" && !strings.HasPrefix(b, "s ") { return false // ' not followed by s and then end-of-word } } // allow "." when followed by non-space for b := line; ; { var ok bool if _, b, ok = strings.Cut(b, "."); !ok { break } if b == "" || strings.HasPrefix(b, " ") { return false // not followed by non-space } } return true } // oldHeading returns the *Heading for the given old-style section heading line. func (d *parseDoc) oldHeading(line string) Block { return &Heading{Text: []Text{Plain(strings.TrimSpace(line))}} } // isHeading reports whether line is a new-style section heading. func isHeading(line string) bool { return len(line) >= 2 && line[0] == '#' && (line[1] == ' ' || line[1] == '\t') && strings.TrimSpace(line) != "#" } // heading returns the *Heading for the given new-style section heading line. func (d *parseDoc) heading(line string) Block { return &Heading{Text: []Text{Plain(strings.TrimSpace(line[1:]))}} } // code returns a code block built from the lines. func (d *parseDoc) code(lines []string) *Code { body := unindent(lines) body = append(body, "") // to get final \n from Join return &Code{Text: strings.Join(body, "\n")} } // paragraph returns a paragraph block built from the lines. // If the lines are link definitions, paragraph adds them to d and returns nil. func (d *parseDoc) paragraph(lines []string) Block { // Is this a block of known links? Handle. var defs []*LinkDef for _, line := range lines { def, ok := parseLink(line) if !ok { goto NoDefs } defs = append(defs, def) } for _, def := range defs { d.Links = append(d.Links, def) if d.links[def.Text] == nil { d.links[def.Text] = def } } return nil NoDefs: return &Paragraph{Text: []Text{Plain(strings.Join(lines, "\n"))}} } // parseLink parses a single link definition line: // // [text]: url // // It returns the link definition and whether the line was well formed. func parseLink(line string) (*LinkDef, bool) { if line == "" || line[0] != '[' { return nil, false } i := strings.Index(line, "]:") if i < 0 || i+3 >= len(line) || (line[i+2] != ' ' && line[i+2] != '\t') { return nil, false } text := line[1:i] url := strings.TrimSpace(line[i+3:]) j := strings.Index(url, "://") if j < 0 || !isScheme(url[:j]) { return nil, false } // Line has right form and has valid scheme://. // That's good enough for us - we are not as picky // about the characters beyond the :// as we are // when extracting inline URLs from text. return &LinkDef{Text: text, URL: url}, true } // list returns a list built from the indented lines, // using forceBlankBefore as the value of the List's ForceBlankBefore field. func (d *parseDoc) list(lines []string, forceBlankBefore bool) *List { num, _, _ := listMarker(lines[0]) var ( list *List = &List{ForceBlankBefore: forceBlankBefore} item *ListItem text []string ) flush := func() { if item != nil { if para := d.paragraph(text); para != nil { item.Content = append(item.Content, para) } } text = nil } for _, line := range lines { if n, after, ok := listMarker(line); ok && (n != "") == (num != "") { // start new list item flush() item = &ListItem{Number: n} list.Items = append(list.Items, item) line = after } line = strings.TrimSpace(line) if line == "" { list.ForceBlankBetween = true flush() continue } text = append(text, strings.TrimSpace(line)) } flush() return list } // listMarker parses the line as beginning with a list marker. // If it can do that, it returns the numeric marker ("" for a bullet list), // the rest of the line, and ok == true. // Otherwise, it returns "", "", false. func listMarker(line string) (num, rest string, ok bool) { line = strings.TrimSpace(line) if line == "" { return "", "", false } // Can we find a marker? if r, n := utf8.DecodeRuneInString(line); r == '•' || r == '*' || r == '+' || r == '-' { num, rest = "", line[n:] } else if '0' <= line[0] && line[0] <= '9' { n := 1 for n < len(line) && '0' <= line[n] && line[n] <= '9' { n++ } if n >= len(line) || (line[n] != '.' && line[n] != ')') { return "", "", false } num, rest = line[:n], line[n+1:] } else { return "", "", false } if !indented(rest) || strings.TrimSpace(rest) == "" { return "", "", false } return num, rest, true } // isList reports whether the line is the first line of a list, // meaning starts with a list marker after any indentation. // (The caller is responsible for checking the line is indented, as appropriate.) func isList(line string) bool { _, _, ok := listMarker(line) return ok } // parseLinkedText parses text that is allowed to contain explicit links, // such as [math.Sin] or [Go home page], into a slice of Text items. // // A “pkg” is only assumed to be a full import path if it starts with // a domain name (a path element with a dot) or is one of the packages // from the standard library (“[os]”, “[encoding/json]”, and so on). // To avoid problems with maps, generics, and array types, doc links // must be both preceded and followed by punctuation, spaces, tabs, // or the start or end of a line. An example problem would be treating // map[ast.Expr]TypeAndValue as containing a link. func (d *parseDoc) parseLinkedText(text string) []Text { var out []Text wrote := 0 flush := func(i int) { if wrote < i { out = d.parseText(out, text[wrote:i], true) wrote = i } } start := -1 var buf []byte for i := 0; i < len(text); i++ { c := text[i] if c == '\n' || c == '\t' { c = ' ' } switch c { case '[': start = i case ']': if start >= 0 { if def, ok := d.links[string(buf)]; ok { def.Used = true flush(start) out = append(out, &Link{ Text: d.parseText(nil, text[start+1:i], false), URL: def.URL, }) wrote = i + 1 } else if link, ok := d.docLink(text[start+1:i], text[:start], text[i+1:]); ok { flush(start) link.Text = d.parseText(nil, text[start+1:i], false) out = append(out, link) wrote = i + 1 } } start = -1 buf = buf[:0] } if start >= 0 && i != start { buf = append(buf, c) } } flush(len(text)) return out } // docLink parses text, which was found inside [ ] brackets, // as a doc link if possible, returning the DocLink and ok == true // or else nil, false. // The before and after strings are the text before the [ and after the ] // on the same line. Doc links must be preceded and followed by // punctuation, spaces, tabs, or the start or end of a line. func (d *parseDoc) docLink(text, before, after string) (link *DocLink, ok bool) { if before != "" { r, _ := utf8.DecodeLastRuneInString(before) if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' { return nil, false } } if after != "" { r, _ := utf8.DecodeRuneInString(after) if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' { return nil, false } } text = strings.TrimPrefix(text, "*") pkg, name, ok := splitDocName(text) var recv string if ok { pkg, recv, _ = splitDocName(pkg) } if pkg != "" { if pkg, ok = d.lookupPkg(pkg); !ok { return nil, false } } else { if ok = d.lookupSym(recv, name); !ok { return nil, false } } link = &DocLink{ ImportPath: pkg, Recv: recv, Name: name, } return link, true } // If text is of the form before.Name, where Name is a capitalized Go identifier, // then splitDocName returns before, name, true. // Otherwise it returns text, "", false. func splitDocName(text string) (before, name string, foundDot bool) { i := strings.LastIndex(text, ".") name = text[i+1:] if !isName(name) { return text, "", false } if i >= 0 { before = text[:i] } return before, name, true } // parseText parses s as text and returns the result of appending // those parsed Text elements to out. // parseText does not handle explicit links like [math.Sin] or [Go home page]: // those are handled by parseLinkedText. // If autoLink is true, then parseText recognizes URLs and words from d.Words // and converts those to links as appropriate. func (d *parseDoc) parseText(out []Text, s string, autoLink bool) []Text { var w strings.Builder wrote := 0 writeUntil := func(i int) { w.WriteString(s[wrote:i]) wrote = i } flush := func(i int) { writeUntil(i) if w.Len() > 0 { out = append(out, Plain(w.String())) w.Reset() } } for i := 0; i < len(s); { t := s[i:] if autoLink { if url, ok := autoURL(t); ok { flush(i) // Note: The old comment parser would look up the URL in words // and replace the target with words[URL] if it was non-empty. // That would allow creating links that display as one URL but // when clicked go to a different URL. Not sure what the point // of that is, so we're not doing that lookup here. out = append(out, &Link{Auto: true, Text: []Text{Plain(url)}, URL: url}) i += len(url) wrote = i continue } if id, ok := ident(t); ok { url, italics := d.Words[id] if !italics { i += len(id) continue } flush(i) if url == "" { out = append(out, Italic(id)) } else { out = append(out, &Link{Auto: true, Text: []Text{Italic(id)}, URL: url}) } i += len(id) wrote = i continue } } switch { case strings.HasPrefix(t, "``"): if len(t) >= 3 && t[2] == '`' { // Do not convert `` inside ```, in case people are mistakenly writing Markdown. i += 3 for i < len(t) && t[i] == '`' { i++ } break } writeUntil(i) w.WriteRune('“') i += 2 wrote = i case strings.HasPrefix(t, "''"): writeUntil(i) w.WriteRune('”') i += 2 wrote = i default: i++ } } flush(len(s)) return out } // autoURL checks whether s begins with a URL that should be hyperlinked. // If so, it returns the URL, which is a prefix of s, and ok == true. // Otherwise it returns "", false. // The caller should skip over the first len(url) bytes of s // before further processing. func autoURL(s string) (url string, ok bool) { // Find the ://. Fast path to pick off non-URL, // since we call this at every position in the string. // The shortest possible URL is ftp://x, 7 bytes. var i int switch { case len(s) < 7: return "", false case s[3] == ':': i = 3 case s[4] == ':': i = 4 case s[5] == ':': i = 5 case s[6] == ':': i = 6 default: return "", false } if i+3 > len(s) || s[i:i+3] != "://" { return "", false } // Check valid scheme. if !isScheme(s[:i]) { return "", false } // Scan host part. Must have at least one byte, // and must start and end in non-punctuation. i += 3 if i >= len(s) || !isHost(s[i]) || isPunct(s[i]) { return "", false } i++ end := i for i < len(s) && isHost(s[i]) { if !isPunct(s[i]) { end = i + 1 } i++ } i = end // At this point we are definitely returning a URL (scheme://host). // We just have to find the longest path we can add to it. // Heuristics abound. // We allow parens, braces, and brackets, // but only if they match (#5043, #22285). // We allow .,:;?! in the path but not at the end, // to avoid end-of-sentence punctuation (#18139, #16565). stk := []byte{} end = i Path: for ; i < len(s); i++ { if isPunct(s[i]) { continue } if !isPath(s[i]) { break } switch s[i] { case '(': stk = append(stk, ')') case '{': stk = append(stk, '}') case '[': stk = append(stk, ']') case ')', '}', ']': if len(stk) == 0 || stk[len(stk)-1] != s[i] { break Path } stk = stk[:len(stk)-1] } if len(stk) == 0 { end = i + 1 } } return s[:end], true } // isScheme reports whether s is a recognized URL scheme. // Note that if strings of new length (beyond 3-7) // are added here, the fast path at the top of autoURL will need updating. func isScheme(s string) bool { switch s { case "file", "ftp", "gopher", "http", "https", "mailto", "nntp": return true } return false } // isHost reports whether c is a byte that can appear in a URL host, // like www.example.com or user@[::1]:8080 func isHost(c byte) bool { // mask is a 128-bit bitmap with 1s for allowed bytes, // so that the byte c can be tested with a shift and an and. // If c > 128, then 1<>64)) != 0 } // isPunct reports whether c is a punctuation byte that can appear // inside a path but not at the end. func isPunct(c byte) bool { // mask is a 128-bit bitmap with 1s for allowed bytes, // so that the byte c can be tested with a shift and an and. // If c > 128, then 1<>64)) != 0 } // isPath reports whether c is a (non-punctuation) path byte. func isPath(c byte) bool { // mask is a 128-bit bitmap with 1s for allowed bytes, // so that the byte c can be tested with a shift and an and. // If c > 128, then 1<>64)) != 0 } // isName reports whether s is a capitalized Go identifier (like Name). func isName(s string) bool { t, ok := ident(s) if !ok || t != s { return false } r, _ := utf8.DecodeRuneInString(s) return unicode.IsUpper(r) } // ident checks whether s begins with a Go identifier. // If so, it returns the identifier, which is a prefix of s, and ok == true. // Otherwise it returns "", false. // The caller should skip over the first len(id) bytes of s // before further processing. func ident(s string) (id string, ok bool) { // Scan [\pL_][\pL_0-9]* n := 0 for n < len(s) { if c := s[n]; c < utf8.RuneSelf { if isIdentASCII(c) && (n > 0 || c < '0' || c > '9') { n++ continue } break } r, nr := utf8.DecodeRuneInString(s[n:]) if unicode.IsLetter(r) { n += nr continue } break } return s[:n], n > 0 } // isIdentASCII reports whether c is an ASCII identifier byte. func isIdentASCII(c byte) bool { // mask is a 128-bit bitmap with 1s for allowed bytes, // so that the byte c can be tested with a shift and an and. // If c > 128, then 1<>64)) != 0 } // validImportPath reports whether path is a valid import path. // It is a lightly edited copy of golang.org/x/mod/module.CheckImportPath. func validImportPath(path string) bool { if !utf8.ValidString(path) { return false } if path == "" { return false } if path[0] == '-' { return false } if strings.Contains(path, "//") { return false } if path[len(path)-1] == '/' { return false } elemStart := 0 for i, r := range path { if r == '/' { if !validImportPathElem(path[elemStart:i]) { return false } elemStart = i + 1 } } return validImportPathElem(path[elemStart:]) } func validImportPathElem(elem string) bool { if elem == "" || elem[0] == '.' || elem[len(elem)-1] == '.' { return false } for i := 0; i < len(elem); i++ { if !importPathOK(elem[i]) { return false } } return true } func importPathOK(c byte) bool { // mask is a 128-bit bitmap with 1s for allowed bytes, // so that the byte c can be tested with a shift and an and. // If c > 128, then 1<>64)) != 0 } gofumpt-0.6.0/internal/govendor/go/doc/comment/print.go000066400000000000000000000171131455555333400231530ustar00rootroot00000000000000// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package comment import ( "bytes" "fmt" "strings" ) // A Printer is a doc comment printer. // The fields in the struct can be filled in before calling // any of the printing methods // in order to customize the details of the printing process. type Printer struct { // HeadingLevel is the nesting level used for // HTML and Markdown headings. // If HeadingLevel is zero, it defaults to level 3, // meaning to use

and ###. HeadingLevel int // HeadingID is a function that computes the heading ID // (anchor tag) to use for the heading h when generating // HTML and Markdown. If HeadingID returns an empty string, // then the heading ID is omitted. // If HeadingID is nil, h.DefaultID is used. HeadingID func(h *Heading) string // DocLinkURL is a function that computes the URL for the given DocLink. // If DocLinkURL is nil, then link.DefaultURL(p.DocLinkBaseURL) is used. DocLinkURL func(link *DocLink) string // DocLinkBaseURL is used when DocLinkURL is nil, // passed to [DocLink.DefaultURL] to construct a DocLink's URL. // See that method's documentation for details. DocLinkBaseURL string // TextPrefix is a prefix to print at the start of every line // when generating text output using the Text method. TextPrefix string // TextCodePrefix is the prefix to print at the start of each // preformatted (code block) line when generating text output, // instead of (not in addition to) TextPrefix. // If TextCodePrefix is the empty string, it defaults to TextPrefix+"\t". TextCodePrefix string // TextWidth is the maximum width text line to generate, // measured in Unicode code points, // excluding TextPrefix and the newline character. // If TextWidth is zero, it defaults to 80 minus the number of code points in TextPrefix. // If TextWidth is negative, there is no limit. TextWidth int } func (p *Printer) headingLevel() int { if p.HeadingLevel <= 0 { return 3 } return p.HeadingLevel } func (p *Printer) headingID(h *Heading) string { if p.HeadingID == nil { return h.DefaultID() } return p.HeadingID(h) } func (p *Printer) docLinkURL(link *DocLink) string { if p.DocLinkURL != nil { return p.DocLinkURL(link) } return link.DefaultURL(p.DocLinkBaseURL) } // DefaultURL constructs and returns the documentation URL for l, // using baseURL as a prefix for links to other packages. // // The possible forms returned by DefaultURL are: // - baseURL/ImportPath, for a link to another package // - baseURL/ImportPath#Name, for a link to a const, func, type, or var in another package // - baseURL/ImportPath#Recv.Name, for a link to a method in another package // - #Name, for a link to a const, func, type, or var in this package // - #Recv.Name, for a link to a method in this package // // If baseURL ends in a trailing slash, then DefaultURL inserts // a slash between ImportPath and # in the anchored forms. // For example, here are some baseURL values and URLs they can generate: // // "/pkg/" → "/pkg/math/#Sqrt" // "/pkg" → "/pkg/math#Sqrt" // "/" → "/math/#Sqrt" // "" → "/math#Sqrt" func (l *DocLink) DefaultURL(baseURL string) string { if l.ImportPath != "" { slash := "" if strings.HasSuffix(baseURL, "/") { slash = "/" } else { baseURL += "/" } switch { case l.Name == "": return baseURL + l.ImportPath + slash case l.Recv != "": return baseURL + l.ImportPath + slash + "#" + l.Recv + "." + l.Name default: return baseURL + l.ImportPath + slash + "#" + l.Name } } if l.Recv != "" { return "#" + l.Recv + "." + l.Name } return "#" + l.Name } // DefaultID returns the default anchor ID for the heading h. // // The default anchor ID is constructed by converting every // rune that is not alphanumeric ASCII to an underscore // and then adding the prefix “hdr-”. // For example, if the heading text is “Go Doc Comments”, // the default ID is “hdr-Go_Doc_Comments”. func (h *Heading) DefaultID() string { // Note: The “hdr-” prefix is important to avoid DOM clobbering attacks. // See https://pkg.go.dev/github.com/google/safehtml#Identifier. var out strings.Builder var p textPrinter p.oneLongLine(&out, h.Text) s := strings.TrimSpace(out.String()) if s == "" { return "" } out.Reset() out.WriteString("hdr-") for _, r := range s { if r < 0x80 && isIdentASCII(byte(r)) { out.WriteByte(byte(r)) } else { out.WriteByte('_') } } return out.String() } type commentPrinter struct { *Printer } // Comment returns the standard Go formatting of the Doc, // without any comment markers. func (p *Printer) Comment(d *Doc) []byte { cp := &commentPrinter{Printer: p} var out bytes.Buffer for i, x := range d.Content { if i > 0 && blankBefore(x) { out.WriteString("\n") } cp.block(&out, x) } // Print one block containing all the link definitions that were used, // and then a second block containing all the unused ones. // This makes it easy to clean up the unused ones: gofmt and // delete the final block. And it's a nice visual signal without // affecting the way the comment formats for users. for i := 0; i < 2; i++ { used := i == 0 first := true for _, def := range d.Links { if def.Used == used { if first { out.WriteString("\n") first = false } out.WriteString("[") out.WriteString(def.Text) out.WriteString("]: ") out.WriteString(def.URL) out.WriteString("\n") } } } return out.Bytes() } // blankBefore reports whether the block x requires a blank line before it. // All blocks do, except for Lists that return false from x.BlankBefore(). func blankBefore(x Block) bool { if x, ok := x.(*List); ok { return x.BlankBefore() } return true } // block prints the block x to out. func (p *commentPrinter) block(out *bytes.Buffer, x Block) { switch x := x.(type) { default: fmt.Fprintf(out, "?%T", x) case *Paragraph: p.text(out, "", x.Text) out.WriteString("\n") case *Heading: out.WriteString("# ") p.text(out, "", x.Text) out.WriteString("\n") case *Code: md := x.Text for md != "" { var line string line, md, _ = strings.Cut(md, "\n") if line != "" { out.WriteString("\t") out.WriteString(line) } out.WriteString("\n") } case *List: loose := x.BlankBetween() for i, item := range x.Items { if i > 0 && loose { out.WriteString("\n") } out.WriteString(" ") if item.Number == "" { out.WriteString(" - ") } else { out.WriteString(item.Number) out.WriteString(". ") } for i, blk := range item.Content { const fourSpace = " " if i > 0 { out.WriteString("\n" + fourSpace) } p.text(out, fourSpace, blk.(*Paragraph).Text) out.WriteString("\n") } } } } // text prints the text sequence x to out. func (p *commentPrinter) text(out *bytes.Buffer, indent string, x []Text) { for _, t := range x { switch t := t.(type) { case Plain: p.indent(out, indent, string(t)) case Italic: p.indent(out, indent, string(t)) case *Link: if t.Auto { p.text(out, indent, t.Text) } else { out.WriteString("[") p.text(out, indent, t.Text) out.WriteString("]") } case *DocLink: out.WriteString("[") p.text(out, indent, t.Text) out.WriteString("]") } } } // indent prints s to out, indenting with the indent string // after each newline in s. func (p *commentPrinter) indent(out *bytes.Buffer, indent, s string) { for s != "" { line, rest, ok := strings.Cut(s, "\n") out.WriteString(line) if ok { out.WriteString("\n") out.WriteString(indent) } s = rest } } gofumpt-0.6.0/internal/govendor/go/doc/comment/std.go000066400000000000000000000012101455555333400226000ustar00rootroot00000000000000// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Code generated by 'go generate' DO NOT EDIT. //disabled go:generate ./mkstd.sh package comment var stdPkgs = []string{ "bufio", "bytes", "cmp", "context", "crypto", "embed", "encoding", "errors", "expvar", "flag", "fmt", "hash", "html", "image", "io", "log", "maps", "math", "mime", "net", "os", "path", "plugin", "reflect", "regexp", "runtime", "slices", "sort", "strconv", "strings", "sync", "syscall", "testing", "time", "unicode", "unsafe", } gofumpt-0.6.0/internal/govendor/go/doc/comment/text.go000066400000000000000000000214661455555333400230110ustar00rootroot00000000000000// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package comment import ( "bytes" "fmt" "sort" "strings" "unicode/utf8" ) // A textPrinter holds the state needed for printing a Doc as plain text. type textPrinter struct { *Printer long strings.Builder prefix string codePrefix string width int } // Text returns a textual formatting of the Doc. // See the [Printer] documentation for ways to customize the text output. func (p *Printer) Text(d *Doc) []byte { tp := &textPrinter{ Printer: p, prefix: p.TextPrefix, codePrefix: p.TextCodePrefix, width: p.TextWidth, } if tp.codePrefix == "" { tp.codePrefix = p.TextPrefix + "\t" } if tp.width == 0 { tp.width = 80 - utf8.RuneCountInString(tp.prefix) } var out bytes.Buffer for i, x := range d.Content { if i > 0 && blankBefore(x) { out.WriteString(tp.prefix) writeNL(&out) } tp.block(&out, x) } anyUsed := false for _, def := range d.Links { if def.Used { anyUsed = true break } } if anyUsed { writeNL(&out) for _, def := range d.Links { if def.Used { fmt.Fprintf(&out, "[%s]: %s\n", def.Text, def.URL) } } } return out.Bytes() } // writeNL calls out.WriteByte('\n') // but first trims trailing spaces on the previous line. func writeNL(out *bytes.Buffer) { // Trim trailing spaces. data := out.Bytes() n := 0 for n < len(data) && (data[len(data)-n-1] == ' ' || data[len(data)-n-1] == '\t') { n++ } if n > 0 { out.Truncate(len(data) - n) } out.WriteByte('\n') } // block prints the block x to out. func (p *textPrinter) block(out *bytes.Buffer, x Block) { switch x := x.(type) { default: fmt.Fprintf(out, "?%T\n", x) case *Paragraph: out.WriteString(p.prefix) p.text(out, "", x.Text) case *Heading: out.WriteString(p.prefix) out.WriteString("# ") p.text(out, "", x.Text) case *Code: text := x.Text for text != "" { var line string line, text, _ = strings.Cut(text, "\n") if line != "" { out.WriteString(p.codePrefix) out.WriteString(line) } writeNL(out) } case *List: loose := x.BlankBetween() for i, item := range x.Items { if i > 0 && loose { out.WriteString(p.prefix) writeNL(out) } out.WriteString(p.prefix) out.WriteString(" ") if item.Number == "" { out.WriteString(" - ") } else { out.WriteString(item.Number) out.WriteString(". ") } for i, blk := range item.Content { const fourSpace = " " if i > 0 { writeNL(out) out.WriteString(p.prefix) out.WriteString(fourSpace) } p.text(out, fourSpace, blk.(*Paragraph).Text) } } } } // text prints the text sequence x to out. func (p *textPrinter) text(out *bytes.Buffer, indent string, x []Text) { p.oneLongLine(&p.long, x) words := strings.Fields(p.long.String()) p.long.Reset() var seq []int if p.width < 0 || len(words) == 0 { seq = []int{0, len(words)} // one long line } else { seq = wrap(words, p.width-utf8.RuneCountInString(indent)) } for i := 0; i+1 < len(seq); i++ { if i > 0 { out.WriteString(p.prefix) out.WriteString(indent) } for j, w := range words[seq[i]:seq[i+1]] { if j > 0 { out.WriteString(" ") } out.WriteString(w) } writeNL(out) } } // oneLongLine prints the text sequence x to out as one long line, // without worrying about line wrapping. // Explicit links have the [ ] dropped to improve readability. func (p *textPrinter) oneLongLine(out *strings.Builder, x []Text) { for _, t := range x { switch t := t.(type) { case Plain: out.WriteString(string(t)) case Italic: out.WriteString(string(t)) case *Link: p.oneLongLine(out, t.Text) case *DocLink: p.oneLongLine(out, t.Text) } } } // wrap wraps words into lines of at most max runes, // minimizing the sum of the squares of the leftover lengths // at the end of each line (except the last, of course), // with a preference for ending lines at punctuation (.,:;). // // The returned slice gives the indexes of the first words // on each line in the wrapped text with a final entry of len(words). // Thus the lines are words[seq[0]:seq[1]], words[seq[1]:seq[2]], // ..., words[seq[len(seq)-2]:seq[len(seq)-1]]. // // The implementation runs in O(n log n) time, where n = len(words), // using the algorithm described in D. S. Hirschberg and L. L. Larmore, // “[The least weight subsequence problem],” FOCS 1985, pp. 137-143. // // [The least weight subsequence problem]: https://doi.org/10.1109/SFCS.1985.60 func wrap(words []string, max int) (seq []int) { // The algorithm requires that our scoring function be concave, // meaning that for all i₀ ≤ i₁ < j₀ ≤ j₁, // weight(i₀, j₀) + weight(i₁, j₁) ≤ weight(i₀, j₁) + weight(i₁, j₀). // // Our weights are two-element pairs [hi, lo] // ordered by elementwise comparison. // The hi entry counts the weight for lines that are longer than max, // and the lo entry counts the weight for lines that are not. // This forces the algorithm to first minimize the number of lines // that are longer than max, which correspond to lines with // single very long words. Having done that, it can move on to // minimizing the lo score, which is more interesting. // // The lo score is the sum for each line of the square of the // number of spaces remaining at the end of the line and a // penalty of 64 given out for not ending the line in a // punctuation character (.,:;). // The penalty is somewhat arbitrarily chosen by trying // different amounts and judging how nice the wrapped text looks. // Roughly speaking, using 64 means that we are willing to // end a line with eight blank spaces in order to end at a // punctuation character, even if the next word would fit in // those spaces. // // We care about ending in punctuation characters because // it makes the text easier to skim if not too many sentences // or phrases begin with a single word on the previous line. // A score is the score (also called weight) for a given line. // add and cmp add and compare scores. type score struct { hi int64 lo int64 } add := func(s, t score) score { return score{s.hi + t.hi, s.lo + t.lo} } cmp := func(s, t score) int { switch { case s.hi < t.hi: return -1 case s.hi > t.hi: return +1 case s.lo < t.lo: return -1 case s.lo > t.lo: return +1 } return 0 } // total[j] is the total number of runes // (including separating spaces) in words[:j]. total := make([]int, len(words)+1) total[0] = 0 for i, s := range words { total[1+i] = total[i] + utf8.RuneCountInString(s) + 1 } // weight returns weight(i, j). weight := func(i, j int) score { // On the last line, there is zero weight for being too short. n := total[j] - 1 - total[i] if j == len(words) && n <= max { return score{0, 0} } // Otherwise the weight is the penalty plus the square of the number of // characters remaining on the line or by which the line goes over. // In the latter case, that value goes in the hi part of the score. // (See note above.) p := wrapPenalty(words[j-1]) v := int64(max-n) * int64(max-n) if n > max { return score{v, p} } return score{0, v + p} } // The rest of this function is “The Basic Algorithm” from // Hirschberg and Larmore's conference paper, // using the same names as in the paper. f := []score{{0, 0}} g := func(i, j int) score { return add(f[i], weight(i, j)) } bridge := func(a, b, c int) bool { k := c + sort.Search(len(words)+1-c, func(k int) bool { k += c return cmp(g(a, k), g(b, k)) > 0 }) if k > len(words) { return true } return cmp(g(c, k), g(b, k)) <= 0 } // d is a one-ended deque implemented as a slice. d := make([]int, 1, len(words)) d[0] = 0 bestleft := make([]int, 1, len(words)) bestleft[0] = -1 for m := 1; m < len(words); m++ { f = append(f, g(d[0], m)) bestleft = append(bestleft, d[0]) for len(d) > 1 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 { d = d[1:] // “Retire” } for len(d) > 1 && bridge(d[len(d)-2], d[len(d)-1], m) { d = d[:len(d)-1] // “Fire” } if cmp(g(m, len(words)), g(d[len(d)-1], len(words))) < 0 { d = append(d, m) // “Hire” // The next few lines are not in the paper but are necessary // to handle two-word inputs correctly. It appears to be // just a bug in the paper's pseudocode. if len(d) == 2 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 { d = d[1:] } } } bestleft = append(bestleft, d[0]) // Recover least weight sequence from bestleft. n := 1 for m := len(words); m > 0; m = bestleft[m] { n++ } seq = make([]int, n) for m := len(words); m > 0; m = bestleft[m] { n-- seq[n] = m } return seq } // wrapPenalty is the penalty for inserting a line break after word s. func wrapPenalty(s string) int64 { switch s[len(s)-1] { case '.', ',', ':', ';': return 0 } return 64 } gofumpt-0.6.0/internal/govendor/go/format/000077500000000000000000000000001455555333400205465ustar00rootroot00000000000000gofumpt-0.6.0/internal/govendor/go/format/format.go000066400000000000000000000106221455555333400223660ustar00rootroot00000000000000// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package format implements standard formatting of Go source. // // Note that formatting of Go source code changes over time, so tools relying on // consistent formatting should execute a specific version of the gofmt binary // instead of using this package. That way, the formatting will be stable, and // the tools won't need to be recompiled each time gofmt changes. // // For example, pre-submit checks that use this package directly would behave // differently depending on what Go version each developer uses, causing the // check to be inherently fragile. package format import ( "bytes" "fmt" "go/ast" "go/parser" "go/token" "io" "mvdan.cc/gofumpt/internal/govendor/go/printer" ) // Keep these in sync with cmd/gofmt/gofmt.go. const ( tabWidth = 8 printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers // printerNormalizeNumbers means to canonicalize number literal prefixes // and exponents while printing. See https://golang.org/doc/go1.13#gofmt. // // This value is defined in mvdan.cc/gofumpt/internal/govendor/go/printer specifically for mvdan.cc/gofumpt/internal/govendor/go/format and cmd/gofmt. printerNormalizeNumbers = 1 << 30 ) var config = printer.Config{Mode: printerMode, Tabwidth: tabWidth} const parserMode = parser.ParseComments | parser.SkipObjectResolution // Node formats node in canonical gofmt style and writes the result to dst. // // The node type must be *ast.File, *printer.CommentedNode, []ast.Decl, // []ast.Stmt, or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, // or ast.Stmt. Node does not modify node. Imports are not sorted for // nodes representing partial source files (for instance, if the node is // not an *ast.File or a *printer.CommentedNode not wrapping an *ast.File). // // The function may return early (before the entire result is written) // and return a formatting error, for instance due to an incorrect AST. func Node(dst io.Writer, fset *token.FileSet, node any) error { // Determine if we have a complete source file (file != nil). var file *ast.File var cnode *printer.CommentedNode switch n := node.(type) { case *ast.File: file = n case *printer.CommentedNode: if f, ok := n.Node.(*ast.File); ok { file = f cnode = n } } // Sort imports if necessary. if file != nil && hasUnsortedImports(file) { // Make a copy of the AST because ast.SortImports is destructive. // TODO(gri) Do this more efficiently. var buf bytes.Buffer err := config.Fprint(&buf, fset, file) if err != nil { return err } file, err = parser.ParseFile(fset, "", buf.Bytes(), parserMode) if err != nil { // We should never get here. If we do, provide good diagnostic. return fmt.Errorf("format.Node internal error (%s)", err) } ast.SortImports(fset, file) // Use new file with sorted imports. node = file if cnode != nil { node = &printer.CommentedNode{Node: file, Comments: cnode.Comments} } } return config.Fprint(dst, fset, node) } // Source formats src in canonical gofmt style and returns the result // or an (I/O or syntax) error. src is expected to be a syntactically // correct Go source file, or a list of Go declarations or statements. // // If src is a partial source file, the leading and trailing space of src // is applied to the result (such that it has the same leading and trailing // space as src), and the result is indented by the same amount as the first // line of src containing code. Imports are not sorted for partial source files. func Source(src []byte) ([]byte, error) { fset := token.NewFileSet() file, sourceAdj, indentAdj, err := parse(fset, "", src, true) if err != nil { return nil, err } if sourceAdj == nil { // Complete source file. // TODO(gri) consider doing this always. ast.SortImports(fset, file) } return format(fset, file, sourceAdj, indentAdj, src, config) } func hasUnsortedImports(file *ast.File) bool { for _, d := range file.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { // Not an import declaration, so we're done. // Imports are always first. return false } if d.Lparen.IsValid() { // For now assume all grouped imports are unsorted. // TODO(gri) Should check if they are sorted already. return true } // Ungrouped imports are sorted by default. } return false } gofumpt-0.6.0/internal/govendor/go/format/internal.go000066400000000000000000000121241455555333400227110ustar00rootroot00000000000000// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // TODO(gri): This file and the file src/cmd/gofmt/internal.go are // the same (but for this comment and the package name). Do not modify // one without the other. Determine if we can factor out functionality // in a public API. See also #11844 for context. package format import ( "bytes" "go/ast" "go/parser" "go/token" "strings" "mvdan.cc/gofumpt/internal/govendor/go/printer" ) // parse parses src, which was read from the named file, // as a Go source file, declaration, or statement list. func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) ( file *ast.File, sourceAdj func(src []byte, indent int) []byte, indentAdj int, err error, ) { // Try as whole source file. file, err = parser.ParseFile(fset, filename, src, parserMode) // If there's no error, return. If the error is that the source file didn't begin with a // package line and source fragments are ok, fall through to // try as a source fragment. Stop and return on any other error. if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") { return } // If this is a declaration list, make it a source file // by inserting a package clause. // Insert using a ';', not a newline, so that the line numbers // in psrc match the ones in src. psrc := append([]byte("package p;"), src...) file, err = parser.ParseFile(fset, filename, psrc, parserMode) if err == nil { sourceAdj = func(src []byte, indent int) []byte { // Remove the package clause. // Gofmt has turned the ';' into a '\n'. src = src[indent+len("package p\n"):] return bytes.TrimSpace(src) } return } // If the error is that the source file didn't begin with a // declaration, fall through to try as a statement list. // Stop and return on any other error. if !strings.Contains(err.Error(), "expected declaration") { return } // If this is a statement list, make it a source file // by inserting a package clause and turning the list // into a function body. This handles expressions too. // Insert using a ';', not a newline, so that the line numbers // in fsrc match the ones in src. Add an extra '\n' before the '}' // to make sure comments are flushed before the '}'. fsrc := append(append([]byte("package p; func _() {"), src...), '\n', '\n', '}') file, err = parser.ParseFile(fset, filename, fsrc, parserMode) if err == nil { sourceAdj = func(src []byte, indent int) []byte { // Cap adjusted indent to zero. if indent < 0 { indent = 0 } // Remove the wrapping. // Gofmt has turned the "; " into a "\n\n". // There will be two non-blank lines with indent, hence 2*indent. src = src[2*indent+len("package p\n\nfunc _() {"):] // Remove only the "}\n" suffix: remaining whitespaces will be trimmed anyway src = src[:len(src)-len("}\n")] return bytes.TrimSpace(src) } // Gofmt has also indented the function body one level. // Adjust that with indentAdj. indentAdj = -1 } // Succeeded, or out of options. return } // format formats the given package file originally obtained from src // and adjusts the result based on the original source via sourceAdj // and indentAdj. func format( fset *token.FileSet, file *ast.File, sourceAdj func(src []byte, indent int) []byte, indentAdj int, src []byte, cfg printer.Config, ) ([]byte, error) { if sourceAdj == nil { // Complete source file. var buf bytes.Buffer err := cfg.Fprint(&buf, fset, file) if err != nil { return nil, err } return buf.Bytes(), nil } // Partial source file. // Determine and prepend leading space. i, j := 0, 0 for j < len(src) && isSpace(src[j]) { if src[j] == '\n' { i = j + 1 // byte offset of last line in leading space } j++ } var res []byte res = append(res, src[:i]...) // Determine and prepend indentation of first code line. // Spaces are ignored unless there are no tabs, // in which case spaces count as one tab. indent := 0 hasSpace := false for _, b := range src[i:j] { switch b { case ' ': hasSpace = true case '\t': indent++ } } if indent == 0 && hasSpace { indent = 1 } for i := 0; i < indent; i++ { res = append(res, '\t') } // Format the source. // Write it without any leading and trailing space. cfg.Indent = indent + indentAdj var buf bytes.Buffer err := cfg.Fprint(&buf, fset, file) if err != nil { return nil, err } out := sourceAdj(buf.Bytes(), cfg.Indent) // If the adjusted output is empty, the source // was empty but (possibly) for white space. // The result is the incoming source. if len(out) == 0 { return src, nil } // Otherwise, append output to leading space. res = append(res, out...) // Determine and append trailing space. i = len(src) for i > 0 && isSpace(src[i-1]) { i-- } return append(res, src[i:]...), nil } // isSpace reports whether the byte is a space character. // isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'. func isSpace(b byte) bool { return b == ' ' || b == '\t' || b == '\n' || b == '\r' } gofumpt-0.6.0/internal/govendor/go/printer/000077500000000000000000000000001455555333400207415ustar00rootroot00000000000000gofumpt-0.6.0/internal/govendor/go/printer/comment.go000066400000000000000000000071021455555333400227320ustar00rootroot00000000000000// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package printer import ( "go/ast" "strings" "mvdan.cc/gofumpt/internal/govendor/go/doc/comment" ) // formatDocComment reformats the doc comment list, // returning the canonical formatting. func formatDocComment(list []*ast.Comment) []*ast.Comment { // Extract comment text (removing comment markers). var kind, text string var directives []*ast.Comment if len(list) == 1 && strings.HasPrefix(list[0].Text, "/*") { kind = "/*" text = list[0].Text if !strings.Contains(text, "\n") || allStars(text) { // Single-line /* .. */ comment in doc comment position, // or multiline old-style comment like // /* // * Comment // * text here. // */ // Should not happen, since it will not work well as a // doc comment, but if it does, just ignore: // reformatting it will only make the situation worse. return list } text = text[2 : len(text)-2] // cut /* and */ } else if strings.HasPrefix(list[0].Text, "//") { kind = "//" var b strings.Builder for _, c := range list { after, found := strings.CutPrefix(c.Text, "//") if !found { return list } // Accumulate //go:build etc lines separately. if isDirective(after) { directives = append(directives, c) continue } b.WriteString(strings.TrimPrefix(after, " ")) b.WriteString("\n") } text = b.String() } else { // Not sure what this is, so leave alone. return list } if text == "" { return list } // Parse comment and reformat as text. var p comment.Parser d := p.Parse(text) var pr comment.Printer text = string(pr.Comment(d)) // For /* */ comment, return one big comment with text inside. slash := list[0].Slash if kind == "/*" { c := &ast.Comment{ Slash: slash, Text: "/*\n" + text + "*/", } return []*ast.Comment{c} } // For // comment, return sequence of // lines. var out []*ast.Comment for text != "" { var line string line, text, _ = strings.Cut(text, "\n") if line == "" { line = "//" } else if strings.HasPrefix(line, "\t") { line = "//" + line } else { line = "// " + line } out = append(out, &ast.Comment{ Slash: slash, Text: line, }) } if len(directives) > 0 { out = append(out, &ast.Comment{ Slash: slash, Text: "//", }) for _, c := range directives { out = append(out, &ast.Comment{ Slash: slash, Text: c.Text, }) } } return out } // isDirective reports whether c is a comment directive. // See go.dev/issue/37974. // This code is also in go/ast. func isDirective(c string) bool { // "//line " is a line directive. // "//extern " is for gccgo. // "//export " is for cgo. // (The // has been removed.) if strings.HasPrefix(c, "line ") || strings.HasPrefix(c, "extern ") || strings.HasPrefix(c, "export ") { return true } // "//[a-z0-9]+:[a-z0-9]" // (The // has been removed.) colon := strings.Index(c, ":") if colon <= 0 || colon+1 >= len(c) { return false } for i := 0; i <= colon+1; i++ { if i == colon { continue } b := c[i] if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') { return false } } return true } // allStars reports whether text is the interior of an // old-style /* */ comment with a star at the start of each line. func allStars(text string) bool { for i := 0; i < len(text); i++ { if text[i] == '\n' { j := i + 1 for j < len(text) && (text[j] == ' ' || text[j] == '\t') { j++ } if j < len(text) && text[j] != '*' { return false } } } return true } gofumpt-0.6.0/internal/govendor/go/printer/gobuild.go000066400000000000000000000111301455555333400227110ustar00rootroot00000000000000// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package printer import ( "go/build/constraint" "sort" "text/tabwriter" ) func (p *printer) fixGoBuildLines() { if len(p.goBuild)+len(p.plusBuild) == 0 { return } // Find latest possible placement of //go:build and // +build comments. // That's just after the last blank line before we find a non-comment. // (We'll add another blank line after our comment block.) // When we start dropping // +build comments, we can skip over /* */ comments too. // Note that we are processing tabwriter input, so every comment // begins and ends with a tabwriter.Escape byte. // And some newlines have turned into \f bytes. insert := 0 for pos := 0; ; { // Skip leading space at beginning of line. blank := true for pos < len(p.output) && (p.output[pos] == ' ' || p.output[pos] == '\t') { pos++ } // Skip over // comment if any. if pos+3 < len(p.output) && p.output[pos] == tabwriter.Escape && p.output[pos+1] == '/' && p.output[pos+2] == '/' { blank = false for pos < len(p.output) && !isNL(p.output[pos]) { pos++ } } // Skip over \n at end of line. if pos >= len(p.output) || !isNL(p.output[pos]) { break } pos++ if blank { insert = pos } } // If there is a //go:build comment before the place we identified, // use that point instead. (Earlier in the file is always fine.) if len(p.goBuild) > 0 && p.goBuild[0] < insert { insert = p.goBuild[0] } else if len(p.plusBuild) > 0 && p.plusBuild[0] < insert { insert = p.plusBuild[0] } var x constraint.Expr switch len(p.goBuild) { case 0: // Synthesize //go:build expression from // +build lines. for _, pos := range p.plusBuild { y, err := constraint.Parse(p.commentTextAt(pos)) if err != nil { x = nil break } if x == nil { x = y } else { x = &constraint.AndExpr{X: x, Y: y} } } case 1: // Parse //go:build expression. x, _ = constraint.Parse(p.commentTextAt(p.goBuild[0])) } var block []byte if x == nil { // Don't have a valid //go:build expression to treat as truth. // Bring all the lines together but leave them alone. // Note that these are already tabwriter-escaped. for _, pos := range p.goBuild { block = append(block, p.lineAt(pos)...) } for _, pos := range p.plusBuild { block = append(block, p.lineAt(pos)...) } } else { block = append(block, tabwriter.Escape) block = append(block, "//go:build "...) block = append(block, x.String()...) block = append(block, tabwriter.Escape, '\n') if len(p.plusBuild) > 0 { lines, err := constraint.PlusBuildLines(x) if err != nil { lines = []string{"// +build error: " + err.Error()} } for _, line := range lines { block = append(block, tabwriter.Escape) block = append(block, line...) block = append(block, tabwriter.Escape, '\n') } } } block = append(block, '\n') // Build sorted list of lines to delete from remainder of output. toDelete := append(p.goBuild, p.plusBuild...) sort.Ints(toDelete) // Collect output after insertion point, with lines deleted, into after. var after []byte start := insert for _, end := range toDelete { if end < start { continue } after = appendLines(after, p.output[start:end]) start = end + len(p.lineAt(end)) } after = appendLines(after, p.output[start:]) if n := len(after); n >= 2 && isNL(after[n-1]) && isNL(after[n-2]) { after = after[:n-1] } p.output = p.output[:insert] p.output = append(p.output, block...) p.output = append(p.output, after...) } // appendLines is like append(x, y...) // but it avoids creating doubled blank lines, // which would not be gofmt-standard output. // It assumes that only whole blocks of lines are being appended, // not line fragments. func appendLines(x, y []byte) []byte { if len(y) > 0 && isNL(y[0]) && // y starts in blank line (len(x) == 0 || len(x) >= 2 && isNL(x[len(x)-1]) && isNL(x[len(x)-2])) { // x is empty or ends in blank line y = y[1:] // delete y's leading blank line } return append(x, y...) } func (p *printer) lineAt(start int) []byte { pos := start for pos < len(p.output) && !isNL(p.output[pos]) { pos++ } if pos < len(p.output) { pos++ } return p.output[start:pos] } func (p *printer) commentTextAt(start int) string { if start < len(p.output) && p.output[start] == tabwriter.Escape { start++ } pos := start for pos < len(p.output) && p.output[pos] != tabwriter.Escape && !isNL(p.output[pos]) { pos++ } return string(p.output[start:pos]) } func isNL(b byte) bool { return b == '\n' || b == '\f' } gofumpt-0.6.0/internal/govendor/go/printer/nodes.go000066400000000000000000001511321455555333400224030ustar00rootroot00000000000000// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements printing of AST nodes; specifically // expressions, statements, declarations, and files. It uses // the print functionality implemented in printer.go. package printer import ( "go/ast" "go/token" "math" "strconv" "strings" "unicode" "unicode/utf8" ) // Formatting issues: // - better comment formatting for /*-style comments at the end of a line (e.g. a declaration) // when the comment spans multiple lines; if such a comment is just two lines, formatting is // not idempotent // - formatting of expression lists // - should use blank instead of tab to separate one-line function bodies from // the function header unless there is a group of consecutive one-liners // ---------------------------------------------------------------------------- // Common AST nodes. // Print as many newlines as necessary (but at least min newlines) to get to // the current line. ws is printed before the first line break. If newSection // is set, the first line break is printed as formfeed. Returns 0 if no line // breaks were printed, returns 1 if there was exactly one newline printed, // and returns a value > 1 if there was a formfeed or more than one newline // printed. // // TODO(gri): linebreak may add too many lines if the next statement at "line" // is preceded by comments because the computation of n assumes // the current position before the comment and the target position // after the comment. Thus, after interspersing such comments, the // space taken up by them is not considered to reduce the number of // linebreaks. At the moment there is no easy way to know about // future (not yet interspersed) comments in this function. func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (nbreaks int) { n := nlimit(line - p.pos.Line) if n < min { n = min } if n > 0 { p.print(ws) if newSection { p.print(formfeed) n-- nbreaks = 2 } nbreaks += n for ; n > 0; n-- { p.print(newline) } } return } // setComment sets g as the next comment if g != nil and if node comments // are enabled - this mode is used when printing source code fragments such // as exports only. It assumes that there is no pending comment in p.comments // and at most one pending comment in the p.comment cache. func (p *printer) setComment(g *ast.CommentGroup) { if g == nil || !p.useNodeComments { return } if p.comments == nil { // initialize p.comments lazily p.comments = make([]*ast.CommentGroup, 1) } else if p.cindex < len(p.comments) { // for some reason there are pending comments; this // should never happen - handle gracefully and flush // all comments up to g, ignore anything after that p.flush(p.posFor(g.List[0].Pos()), token.ILLEGAL) p.comments = p.comments[0:1] // in debug mode, report error p.internalError("setComment found pending comments") } p.comments[0] = g p.cindex = 0 // don't overwrite any pending comment in the p.comment cache // (there may be a pending comment when a line comment is // immediately followed by a lead comment with no other // tokens between) if p.commentOffset == infinity { p.nextComment() // get comment ready for use } } type exprListMode uint const ( commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma noIndent // no extra indentation in multi-line lists ) // If indent is set, a multi-line identifier list is indented after the // first linebreak encountered. func (p *printer) identList(list []*ast.Ident, indent bool) { // convert into an expression list so we can re-use exprList formatting xlist := make([]ast.Expr, len(list)) for i, x := range list { xlist[i] = x } var mode exprListMode if !indent { mode = noIndent } p.exprList(token.NoPos, xlist, 1, mode, token.NoPos, false) } const filteredMsg = "contains filtered or unexported fields" // Print a list of expressions. If the list spans multiple // source lines, the original line breaks are respected between // expressions. // // TODO(gri) Consider rewriting this to be independent of []ast.Expr // so that we can use the algorithm for any kind of list // // (e.g., pass list via a channel over which to range). func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos, isIncomplete bool) { if len(list) == 0 { if isIncomplete { prev := p.posFor(prev0) next := p.posFor(next0) if prev.IsValid() && prev.Line == next.Line { p.print("/* " + filteredMsg + " */") } else { p.print(newline) p.print(indent, "// "+filteredMsg, unindent, newline) } } return } prev := p.posFor(prev0) next := p.posFor(next0) line := p.lineFor(list[0].Pos()) endLine := p.lineFor(list[len(list)-1].End()) if prev.IsValid() && prev.Line == line && line == endLine { // all list entries on a single line for i, x := range list { if i > 0 { // use position of expression following the comma as // comma position for correct comment placement p.setPos(x.Pos()) p.print(token.COMMA, blank) } p.expr0(x, depth) } if isIncomplete { p.print(token.COMMA, blank, "/* "+filteredMsg+" */") } return } // list entries span multiple lines; // use source code positions to guide line breaks // Don't add extra indentation if noIndent is set; // i.e., pretend that the first line is already indented. ws := ignore if mode&noIndent == 0 { ws = indent } // The first linebreak is always a formfeed since this section must not // depend on any previous formatting. prevBreak := -1 // index of last expression that was followed by a linebreak if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) > 0 { ws = ignore prevBreak = 0 } // initialize expression/key size: a zero value indicates expr/key doesn't fit on a single line size := 0 // We use the ratio between the geometric mean of the previous key sizes and // the current size to determine if there should be a break in the alignment. // To compute the geometric mean we accumulate the ln(size) values (lnsum) // and the number of sizes included (count). lnsum := 0.0 count := 0 // print all list elements prevLine := prev.Line for i, x := range list { line = p.lineFor(x.Pos()) // Determine if the next linebreak, if any, needs to use formfeed: // in general, use the entire node size to make the decision; for // key:value expressions, use the key size. // TODO(gri) for a better result, should probably incorporate both // the key and the node size into the decision process useFF := true // Determine element size: All bets are off if we don't have // position information for the previous and next token (likely // generated code - simply ignore the size in this case by setting // it to 0). prevSize := size const infinity = 1e6 // larger than any source line size = p.nodeSize(x, infinity) pair, isPair := x.(*ast.KeyValueExpr) if size <= infinity && prev.IsValid() && next.IsValid() { // x fits on a single line if isPair { size = p.nodeSize(pair.Key, infinity) // size <= infinity } } else { // size too large or we don't have good layout information size = 0 } // If the previous line and the current line had single- // line-expressions and the key sizes are small or the // ratio between the current key and the geometric mean // if the previous key sizes does not exceed a threshold, // align columns and do not use formfeed. if prevSize > 0 && size > 0 { const smallSize = 40 if count == 0 || prevSize <= smallSize && size <= smallSize { useFF = false } else { const r = 2.5 // threshold geomean := math.Exp(lnsum / float64(count)) // count > 0 ratio := float64(size) / geomean useFF = r*ratio <= 1 || r <= ratio } } needsLinebreak := 0 < prevLine && prevLine < line if i > 0 { // Use position of expression following the comma as // comma position for correct comment placement, but // only if the expression is on the same line. if !needsLinebreak { p.setPos(x.Pos()) } p.print(token.COMMA) needsBlank := true if needsLinebreak { // Lines are broken using newlines so comments remain aligned // unless useFF is set or there are multiple expressions on // the same line in which case formfeed is used. nbreaks := p.linebreak(line, 0, ws, useFF || prevBreak+1 < i) if nbreaks > 0 { ws = ignore prevBreak = i needsBlank = false // we got a line break instead } // If there was a new section or more than one new line // (which means that the tabwriter will implicitly break // the section), reset the geomean variables since we are // starting a new group of elements with the next element. if nbreaks > 1 { lnsum = 0 count = 0 } } if needsBlank { p.print(blank) } } if len(list) > 1 && isPair && size > 0 && needsLinebreak { // We have a key:value expression that fits onto one line // and it's not on the same line as the prior expression: // Use a column for the key such that consecutive entries // can align if possible. // (needsLinebreak is set if we started a new line before) p.expr(pair.Key) p.setPos(pair.Colon) p.print(token.COLON, vtab) p.expr(pair.Value) } else { p.expr0(x, depth) } if size > 0 { lnsum += math.Log(float64(size)) count++ } prevLine = line } if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line { // Print a terminating comma if the next token is on a new line. p.print(token.COMMA) if isIncomplete { p.print(newline) p.print("// " + filteredMsg) } if ws == ignore && mode&noIndent == 0 { // unindent if we indented p.print(unindent) } p.print(formfeed) // terminating comma needs a line break to look good return } if isIncomplete { p.print(token.COMMA, newline) p.print("// "+filteredMsg, newline) } if ws == ignore && mode&noIndent == 0 { // unindent if we indented p.print(unindent) } } type paramMode int const ( funcParam paramMode = iota funcTParam typeTParam ) func (p *printer) parameters(fields *ast.FieldList, mode paramMode) { openTok, closeTok := token.LPAREN, token.RPAREN if mode != funcParam { openTok, closeTok = token.LBRACK, token.RBRACK } p.setPos(fields.Opening) p.print(openTok) if len(fields.List) > 0 { prevLine := p.lineFor(fields.Opening) ws := indent for i, par := range fields.List { // determine par begin and end line (may be different // if there are multiple parameter names for this par // or the type is on a separate line) parLineBeg := p.lineFor(par.Pos()) parLineEnd := p.lineFor(par.End()) // separating "," if needed needsLinebreak := 0 < prevLine && prevLine < parLineBeg if i > 0 { // use position of parameter following the comma as // comma position for correct comma placement, but // only if the next parameter is on the same line if !needsLinebreak { p.setPos(par.Pos()) } p.print(token.COMMA) } // separator if needed (linebreak or blank) if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) > 0 { // break line if the opening "(" or previous parameter ended on a different line ws = ignore } else if i > 0 { p.print(blank) } // parameter names if len(par.Names) > 0 { // Very subtle: If we indented before (ws == ignore), identList // won't indent again. If we didn't (ws == indent), identList will // indent if the identList spans multiple lines, and it will outdent // again at the end (and still ws == indent). Thus, a subsequent indent // by a linebreak call after a type, or in the next multi-line identList // will do the right thing. p.identList(par.Names, ws == indent) p.print(blank) } // parameter type p.expr(stripParensAlways(par.Type)) prevLine = parLineEnd } // if the closing ")" is on a separate line from the last parameter, // print an additional "," and line break if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing { p.print(token.COMMA) p.linebreak(closing, 0, ignore, true) } else if mode == typeTParam && fields.NumFields() == 1 && combinesWithName(fields.List[0].Type) { // A type parameter list [P T] where the name P and the type expression T syntactically // combine to another valid (value) expression requires a trailing comma, as in [P *T,] // (or an enclosing interface as in [P interface(*T)]), so that the type parameter list // is not parsed as an array length [P*T]. p.print(token.COMMA) } // unindent if we indented if ws == ignore { p.print(unindent) } } p.setPos(fields.Closing) p.print(closeTok) } // combinesWithName reports whether a name followed by the expression x // syntactically combines to another valid (value) expression. For instance // using *T for x, "name *T" syntactically appears as the expression x*T. // On the other hand, using P|Q or *P|~Q for x, "name P|Q" or name *P|~Q" // cannot be combined into a valid (value) expression. func combinesWithName(x ast.Expr) bool { switch x := x.(type) { case *ast.StarExpr: // name *x.X combines to name*x.X if x.X is not a type element return !isTypeElem(x.X) case *ast.BinaryExpr: return combinesWithName(x.X) && !isTypeElem(x.Y) case *ast.ParenExpr: // name(x) combines but we are making sure at // the call site that x is never parenthesized. panic("unexpected parenthesized expression") } return false } // isTypeElem reports whether x is a (possibly parenthesized) type element expression. // The result is false if x could be a type element OR an ordinary (value) expression. func isTypeElem(x ast.Expr) bool { switch x := x.(type) { case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: return true case *ast.UnaryExpr: return x.Op == token.TILDE case *ast.BinaryExpr: return isTypeElem(x.X) || isTypeElem(x.Y) case *ast.ParenExpr: return isTypeElem(x.X) } return false } func (p *printer) signature(sig *ast.FuncType) { if sig.TypeParams != nil { p.parameters(sig.TypeParams, funcTParam) } if sig.Params != nil { p.parameters(sig.Params, funcParam) } else { p.print(token.LPAREN, token.RPAREN) } res := sig.Results n := res.NumFields() if n > 0 { // res != nil p.print(blank) if n == 1 && res.List[0].Names == nil { // single anonymous res; no ()'s p.expr(stripParensAlways(res.List[0].Type)) return } p.parameters(res, funcParam) } } func identListSize(list []*ast.Ident, maxSize int) (size int) { for i, x := range list { if i > 0 { size += len(", ") } size += utf8.RuneCountInString(x.Name) if size >= maxSize { break } } return } func (p *printer) isOneLineFieldList(list []*ast.Field) bool { if len(list) != 1 { return false // allow only one field } f := list[0] if f.Tag != nil || f.Comment != nil { return false // don't allow tags or comments } // only name(s) and type const maxSize = 30 // adjust as appropriate, this is an approximate value namesSize := identListSize(f.Names, maxSize) if namesSize > 0 { namesSize = 1 // blank between names and types } typeSize := p.nodeSize(f.Type, maxSize) return namesSize+typeSize <= maxSize } func (p *printer) setLineComment(text string) { p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}}) } func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) { lbrace := fields.Opening list := fields.List rbrace := fields.Closing hasComments := isIncomplete || p.commentBefore(p.posFor(rbrace)) srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.lineFor(lbrace) == p.lineFor(rbrace) if !hasComments && srcIsOneLine { // possibly a one-line struct/interface if len(list) == 0 { // no blank between keyword and {} in this case p.setPos(lbrace) p.print(token.LBRACE) p.setPos(rbrace) p.print(token.RBRACE) return } else if p.isOneLineFieldList(list) { // small enough - print on one line // (don't use identList and ignore source line breaks) p.setPos(lbrace) p.print(token.LBRACE, blank) f := list[0] if isStruct { for i, x := range f.Names { if i > 0 { // no comments so no need for comma position p.print(token.COMMA, blank) } p.expr(x) } if len(f.Names) > 0 { p.print(blank) } p.expr(f.Type) } else { // interface if len(f.Names) > 0 { name := f.Names[0] // method name p.expr(name) p.signature(f.Type.(*ast.FuncType)) // don't print "func" } else { // embedded interface p.expr(f.Type) } } p.print(blank) p.setPos(rbrace) p.print(token.RBRACE) return } } // hasComments || !srcIsOneLine p.print(blank) p.setPos(lbrace) p.print(token.LBRACE, indent) if hasComments || len(list) > 0 { p.print(formfeed) } if isStruct { sep := vtab if len(list) == 1 { sep = blank } var line int for i, f := range list { if i > 0 { p.linebreak(p.lineFor(f.Pos()), 1, ignore, p.linesFrom(line) > 0) } extraTabs := 0 p.setComment(f.Doc) p.recordLine(&line) if len(f.Names) > 0 { // named fields p.identList(f.Names, false) p.print(sep) p.expr(f.Type) extraTabs = 1 } else { // anonymous field p.expr(f.Type) extraTabs = 2 } if f.Tag != nil { if len(f.Names) > 0 && sep == vtab { p.print(sep) } p.print(sep) p.expr(f.Tag) extraTabs = 0 } if f.Comment != nil { for ; extraTabs > 0; extraTabs-- { p.print(sep) } p.setComment(f.Comment) } } if isIncomplete { if len(list) > 0 { p.print(formfeed) } p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment p.setLineComment("// " + filteredMsg) } } else { // interface var line int var prev *ast.Ident // previous "type" identifier for i, f := range list { var name *ast.Ident // first name, or nil if len(f.Names) > 0 { name = f.Names[0] } if i > 0 { // don't do a line break (min == 0) if we are printing a list of types // TODO(gri) this doesn't work quite right if the list of types is // spread across multiple lines min := 1 if prev != nil && name == prev { min = 0 } p.linebreak(p.lineFor(f.Pos()), min, ignore, p.linesFrom(line) > 0) } p.setComment(f.Doc) p.recordLine(&line) if name != nil { // method p.expr(name) p.signature(f.Type.(*ast.FuncType)) // don't print "func" prev = nil } else { // embedded interface p.expr(f.Type) prev = nil } p.setComment(f.Comment) } if isIncomplete { if len(list) > 0 { p.print(formfeed) } p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment p.setLineComment("// contains filtered or unexported methods") } } p.print(unindent, formfeed) p.setPos(rbrace) p.print(token.RBRACE) } // ---------------------------------------------------------------------------- // Expressions func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) { switch e.Op.Precedence() { case 4: has4 = true case 5: has5 = true } switch l := e.X.(type) { case *ast.BinaryExpr: if l.Op.Precedence() < e.Op.Precedence() { // parens will be inserted. // pretend this is an *ast.ParenExpr and do nothing. break } h4, h5, mp := walkBinary(l) has4 = has4 || h4 has5 = has5 || h5 if maxProblem < mp { maxProblem = mp } } switch r := e.Y.(type) { case *ast.BinaryExpr: if r.Op.Precedence() <= e.Op.Precedence() { // parens will be inserted. // pretend this is an *ast.ParenExpr and do nothing. break } h4, h5, mp := walkBinary(r) has4 = has4 || h4 has5 = has5 || h5 if maxProblem < mp { maxProblem = mp } case *ast.StarExpr: if e.Op == token.QUO { // `*/` maxProblem = 5 } case *ast.UnaryExpr: switch e.Op.String() + r.Op.String() { case "/*", "&&", "&^": maxProblem = 5 case "++", "--": if maxProblem < 4 { maxProblem = 4 } } } return } func cutoff(e *ast.BinaryExpr, depth int) int { has4, has5, maxProblem := walkBinary(e) if maxProblem > 0 { return maxProblem + 1 } if has4 && has5 { if depth == 1 { return 5 } return 4 } if depth == 1 { return 6 } return 4 } func diffPrec(expr ast.Expr, prec int) int { x, ok := expr.(*ast.BinaryExpr) if !ok || prec != x.Op.Precedence() { return 1 } return 0 } func reduceDepth(depth int) int { depth-- if depth < 1 { depth = 1 } return depth } // Format the binary expression: decide the cutoff and then format. // Let's call depth == 1 Normal mode, and depth > 1 Compact mode. // (Algorithm suggestion by Russ Cox.) // // The precedences are: // // 5 * / % << >> & &^ // 4 + - | ^ // 3 == != < <= > >= // 2 && // 1 || // // The only decision is whether there will be spaces around levels 4 and 5. // There are never spaces at level 6 (unary), and always spaces at levels 3 and below. // // To choose the cutoff, look at the whole expression but excluding primary // expressions (function calls, parenthesized exprs), and apply these rules: // // 1. If there is a binary operator with a right side unary operand // that would clash without a space, the cutoff must be (in order): // // /* 6 // && 6 // &^ 6 // ++ 5 // -- 5 // // (Comparison operators always have spaces around them.) // // 2. If there is a mix of level 5 and level 4 operators, then the cutoff // is 5 (use spaces to distinguish precedence) in Normal mode // and 4 (never use spaces) in Compact mode. // // 3. If there are no level 4 operators or no level 5 operators, then the // cutoff is 6 (always use spaces) in Normal mode // and 4 (never use spaces) in Compact mode. func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) { prec := x.Op.Precedence() if prec < prec1 { // parenthesis needed // Note: The parser inserts an ast.ParenExpr node; thus this case // can only occur if the AST is created in a different way. p.print(token.LPAREN) p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth p.print(token.RPAREN) return } printBlank := prec < cutoff ws := indent p.expr1(x.X, prec, depth+diffPrec(x.X, prec)) if printBlank { p.print(blank) } xline := p.pos.Line // before the operator (it may be on the next line!) yline := p.lineFor(x.Y.Pos()) p.setPos(x.OpPos) p.print(x.Op) if xline != yline && xline > 0 && yline > 0 { // at least one line break, but respect an extra empty line // in the source if p.linebreak(yline, 1, ws, true) > 0 { ws = ignore printBlank = false // no blank after line break } } if printBlank { p.print(blank) } p.expr1(x.Y, prec+1, depth+1) if ws == ignore { p.print(unindent) } } func isBinary(expr ast.Expr) bool { _, ok := expr.(*ast.BinaryExpr) return ok } func (p *printer) expr1(expr ast.Expr, prec1, depth int) { p.setPos(expr.Pos()) switch x := expr.(type) { case *ast.BadExpr: p.print("BadExpr") case *ast.Ident: p.print(x) case *ast.BinaryExpr: if depth < 1 { p.internalError("depth < 1:", depth) depth = 1 } p.binaryExpr(x, prec1, cutoff(x, depth), depth) case *ast.KeyValueExpr: p.expr(x.Key) p.setPos(x.Colon) p.print(token.COLON, blank) p.expr(x.Value) case *ast.StarExpr: const prec = token.UnaryPrec if prec < prec1 { // parenthesis needed p.print(token.LPAREN) p.print(token.MUL) p.expr(x.X) p.print(token.RPAREN) } else { // no parenthesis needed p.print(token.MUL) p.expr(x.X) } case *ast.UnaryExpr: const prec = token.UnaryPrec if prec < prec1 { // parenthesis needed p.print(token.LPAREN) p.expr(x) p.print(token.RPAREN) } else { // no parenthesis needed p.print(x.Op) if x.Op == token.RANGE { // TODO(gri) Remove this code if it cannot be reached. p.print(blank) } p.expr1(x.X, prec, depth) } case *ast.BasicLit: if p.Config.Mode&normalizeNumbers != 0 { x = normalizedNumber(x) } p.print(x) case *ast.FuncLit: p.setPos(x.Type.Pos()) p.print(token.FUNC) // See the comment in funcDecl about how the header size is computed. startCol := p.out.Column - len("func") p.signature(x.Type) p.funcBody(p.distanceFrom(x.Type.Pos(), startCol), blank, x.Body) case *ast.ParenExpr: if _, hasParens := x.X.(*ast.ParenExpr); hasParens { // don't print parentheses around an already parenthesized expression // TODO(gri) consider making this more general and incorporate precedence levels p.expr0(x.X, depth) } else { p.print(token.LPAREN) p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth p.setPos(x.Rparen) p.print(token.RPAREN) } case *ast.SelectorExpr: p.selectorExpr(x, depth, false) case *ast.TypeAssertExpr: p.expr1(x.X, token.HighestPrec, depth) p.print(token.PERIOD) p.setPos(x.Lparen) p.print(token.LPAREN) if x.Type != nil { p.expr(x.Type) } else { p.print(token.TYPE) } p.setPos(x.Rparen) p.print(token.RPAREN) case *ast.IndexExpr: // TODO(gri): should treat[] like parentheses and undo one level of depth p.expr1(x.X, token.HighestPrec, 1) p.setPos(x.Lbrack) p.print(token.LBRACK) p.expr0(x.Index, depth+1) p.setPos(x.Rbrack) p.print(token.RBRACK) case *ast.IndexListExpr: // TODO(gri): as for IndexExpr, should treat [] like parentheses and undo // one level of depth p.expr1(x.X, token.HighestPrec, 1) p.setPos(x.Lbrack) p.print(token.LBRACK) p.exprList(x.Lbrack, x.Indices, depth+1, commaTerm, x.Rbrack, false) p.setPos(x.Rbrack) p.print(token.RBRACK) case *ast.SliceExpr: // TODO(gri): should treat[] like parentheses and undo one level of depth p.expr1(x.X, token.HighestPrec, 1) p.setPos(x.Lbrack) p.print(token.LBRACK) indices := []ast.Expr{x.Low, x.High} if x.Max != nil { indices = append(indices, x.Max) } // determine if we need extra blanks around ':' var needsBlanks bool if depth <= 1 { var indexCount int var hasBinaries bool for _, x := range indices { if x != nil { indexCount++ if isBinary(x) { hasBinaries = true } } } if indexCount > 1 && hasBinaries { needsBlanks = true } } for i, x := range indices { if i > 0 { if indices[i-1] != nil && needsBlanks { p.print(blank) } p.print(token.COLON) if x != nil && needsBlanks { p.print(blank) } } if x != nil { p.expr0(x, depth+1) } } p.setPos(x.Rbrack) p.print(token.RBRACK) case *ast.CallExpr: if len(x.Args) > 1 { depth++ } var wasIndented bool if _, ok := x.Fun.(*ast.FuncType); ok { // conversions to literal function types require parentheses around the type p.print(token.LPAREN) wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth) p.print(token.RPAREN) } else { wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth) } p.setPos(x.Lparen) p.print(token.LPAREN) if x.Ellipsis.IsValid() { p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis, false) p.setPos(x.Ellipsis) p.print(token.ELLIPSIS) if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) { p.print(token.COMMA, formfeed) } } else { p.exprList(x.Lparen, x.Args, depth, commaTerm, x.Rparen, false) } p.setPos(x.Rparen) p.print(token.RPAREN) if wasIndented { p.print(unindent) } case *ast.CompositeLit: // composite literal elements that are composite literals themselves may have the type omitted if x.Type != nil { p.expr1(x.Type, token.HighestPrec, depth) } p.level++ p.setPos(x.Lbrace) p.print(token.LBRACE) p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace, x.Incomplete) // do not insert extra line break following a /*-style comment // before the closing '}' as it might break the code if there // is no trailing ',' mode := noExtraLinebreak // do not insert extra blank following a /*-style comment // before the closing '}' unless the literal is empty if len(x.Elts) > 0 { mode |= noExtraBlank } // need the initial indent to print lone comments with // the proper level of indentation p.print(indent, unindent, mode) p.setPos(x.Rbrace) p.print(token.RBRACE, mode) p.level-- case *ast.Ellipsis: p.print(token.ELLIPSIS) if x.Elt != nil { p.expr(x.Elt) } case *ast.ArrayType: p.print(token.LBRACK) if x.Len != nil { p.expr(x.Len) } p.print(token.RBRACK) p.expr(x.Elt) case *ast.StructType: p.print(token.STRUCT) p.fieldList(x.Fields, true, x.Incomplete) case *ast.FuncType: p.print(token.FUNC) p.signature(x) case *ast.InterfaceType: p.print(token.INTERFACE) p.fieldList(x.Methods, false, x.Incomplete) case *ast.MapType: p.print(token.MAP, token.LBRACK) p.expr(x.Key) p.print(token.RBRACK) p.expr(x.Value) case *ast.ChanType: switch x.Dir { case ast.SEND | ast.RECV: p.print(token.CHAN) case ast.RECV: p.print(token.ARROW, token.CHAN) // x.Arrow and x.Pos() are the same case ast.SEND: p.print(token.CHAN) p.setPos(x.Arrow) p.print(token.ARROW) } p.print(blank) p.expr(x.Value) default: panic("unreachable") } } // normalizedNumber rewrites base prefixes and exponents // of numbers to use lower-case letters (0X123 to 0x123 and 1.2E3 to 1.2e3), // and removes leading 0's from integer imaginary literals (0765i to 765i). // It leaves hexadecimal digits alone. // // normalizedNumber doesn't modify the ast.BasicLit value lit points to. // If lit is not a number or a number in canonical format already, // lit is returned as is. Otherwise a new ast.BasicLit is created. func normalizedNumber(lit *ast.BasicLit) *ast.BasicLit { if lit.Kind != token.INT && lit.Kind != token.FLOAT && lit.Kind != token.IMAG { return lit // not a number - nothing to do } if len(lit.Value) < 2 { return lit // only one digit (common case) - nothing to do } // len(lit.Value) >= 2 // We ignore lit.Kind because for lit.Kind == token.IMAG the literal may be an integer // or floating-point value, decimal or not. Instead, just consider the literal pattern. x := lit.Value switch x[:2] { default: // 0-prefix octal, decimal int, or float (possibly with 'i' suffix) if i := strings.LastIndexByte(x, 'E'); i >= 0 { x = x[:i] + "e" + x[i+1:] break } // remove leading 0's from integer (but not floating-point) imaginary literals if x[len(x)-1] == 'i' && !strings.ContainsAny(x, ".e") { x = strings.TrimLeft(x, "0_") if x == "i" { x = "0i" } } case "0X": x = "0x" + x[2:] // possibly a hexadecimal float if i := strings.LastIndexByte(x, 'P'); i >= 0 { x = x[:i] + "p" + x[i+1:] } case "0x": // possibly a hexadecimal float i := strings.LastIndexByte(x, 'P') if i == -1 { return lit // nothing to do } x = x[:i] + "p" + x[i+1:] case "0O": x = "0o" + x[2:] case "0o": return lit // nothing to do case "0B": x = "0b" + x[2:] case "0b": return lit // nothing to do } return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: lit.Kind, Value: x} } func (p *printer) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool { if x, ok := expr.(*ast.SelectorExpr); ok { return p.selectorExpr(x, depth, true) } p.expr1(expr, prec1, depth) return false } // selectorExpr handles an *ast.SelectorExpr node and reports whether x spans // multiple lines. func (p *printer) selectorExpr(x *ast.SelectorExpr, depth int, isMethod bool) bool { p.expr1(x.X, token.HighestPrec, depth) p.print(token.PERIOD) if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line { p.print(indent, newline) p.setPos(x.Sel.Pos()) p.print(x.Sel) if !isMethod { p.print(unindent) } return true } p.setPos(x.Sel.Pos()) p.print(x.Sel) return false } func (p *printer) expr0(x ast.Expr, depth int) { p.expr1(x, token.LowestPrec, depth) } func (p *printer) expr(x ast.Expr) { const depth = 1 p.expr1(x, token.LowestPrec, depth) } // ---------------------------------------------------------------------------- // Statements // Print the statement list indented, but without a newline after the last statement. // Extra line breaks between statements in the source are respected but at most one // empty line is printed between statements. func (p *printer) stmtList(list []ast.Stmt, nindent int, nextIsRBrace bool) { if nindent > 0 { p.print(indent) } var line int i := 0 for _, s := range list { // ignore empty statements (was issue 3466) if _, isEmpty := s.(*ast.EmptyStmt); !isEmpty { // nindent == 0 only for lists of switch/select case clauses; // in those cases each clause is a new section if len(p.output) > 0 { // only print line break if we are not at the beginning of the output // (i.e., we are not printing only a partial program) p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || nindent == 0 || p.linesFrom(line) > 0) } p.recordLine(&line) p.stmt(s, nextIsRBrace && i == len(list)-1) // labeled statements put labels on a separate line, but here // we only care about the start line of the actual statement // without label - correct line for each label for t := s; ; { lt, _ := t.(*ast.LabeledStmt) if lt == nil { break } line++ t = lt.Stmt } i++ } } if nindent > 0 { p.print(unindent) } } // block prints an *ast.BlockStmt; it always spans at least two lines. func (p *printer) block(b *ast.BlockStmt, nindent int) { p.setPos(b.Lbrace) p.print(token.LBRACE) p.stmtList(b.List, nindent, true) p.linebreak(p.lineFor(b.Rbrace), 1, ignore, true) p.setPos(b.Rbrace) p.print(token.RBRACE) } func isTypeName(x ast.Expr) bool { switch t := x.(type) { case *ast.Ident: return true case *ast.SelectorExpr: return isTypeName(t.X) } return false } func stripParens(x ast.Expr) ast.Expr { if px, strip := x.(*ast.ParenExpr); strip { // parentheses must not be stripped if there are any // unparenthesized composite literals starting with // a type name ast.Inspect(px.X, func(node ast.Node) bool { switch x := node.(type) { case *ast.ParenExpr: // parentheses protect enclosed composite literals return false case *ast.CompositeLit: if isTypeName(x.Type) { strip = false // do not strip parentheses } return false } // in all other cases, keep inspecting return true }) if strip { return stripParens(px.X) } } return x } func stripParensAlways(x ast.Expr) ast.Expr { if x, ok := x.(*ast.ParenExpr); ok { return stripParensAlways(x.X) } return x } func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) { p.print(blank) needsBlank := false if init == nil && post == nil { // no semicolons required if expr != nil { p.expr(stripParens(expr)) needsBlank = true } } else { // all semicolons required // (they are not separators, print them explicitly) if init != nil { p.stmt(init, false) } p.print(token.SEMICOLON, blank) if expr != nil { p.expr(stripParens(expr)) needsBlank = true } if isForStmt { p.print(token.SEMICOLON, blank) needsBlank = false if post != nil { p.stmt(post, false) needsBlank = true } } } if needsBlank { p.print(blank) } } // indentList reports whether an expression list would look better if it // were indented wholesale (starting with the very first element, rather // than starting at the first line break). func (p *printer) indentList(list []ast.Expr) bool { // Heuristic: indentList reports whether there are more than one multi- // line element in the list, or if there is any element that is not // starting on the same line as the previous one ends. if len(list) >= 2 { b := p.lineFor(list[0].Pos()) e := p.lineFor(list[len(list)-1].End()) if 0 < b && b < e { // list spans multiple lines n := 0 // multi-line element count line := b for _, x := range list { xb := p.lineFor(x.Pos()) xe := p.lineFor(x.End()) if line < xb { // x is not starting on the same // line as the previous one ended return true } if xb < xe { // x is a multi-line element n++ } line = xe } return n > 1 } } return false } func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) { p.setPos(stmt.Pos()) switch s := stmt.(type) { case *ast.BadStmt: p.print("BadStmt") case *ast.DeclStmt: p.decl(s.Decl) case *ast.EmptyStmt: // nothing to do case *ast.LabeledStmt: // a "correcting" unindent immediately following a line break // is applied before the line break if there is no comment // between (see writeWhitespace) p.print(unindent) p.expr(s.Label) p.setPos(s.Colon) p.print(token.COLON, indent) if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty { if !nextIsRBrace { p.print(newline) p.setPos(e.Pos()) p.print(token.SEMICOLON) break } } else { p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true) } p.stmt(s.Stmt, nextIsRBrace) case *ast.ExprStmt: const depth = 1 p.expr0(s.X, depth) case *ast.SendStmt: const depth = 1 p.expr0(s.Chan, depth) p.print(blank) p.setPos(s.Arrow) p.print(token.ARROW, blank) p.expr0(s.Value, depth) case *ast.IncDecStmt: const depth = 1 p.expr0(s.X, depth+1) p.setPos(s.TokPos) p.print(s.Tok) case *ast.AssignStmt: depth := 1 if len(s.Lhs) > 1 && len(s.Rhs) > 1 { depth++ } p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos, false) p.print(blank) p.setPos(s.TokPos) p.print(s.Tok, blank) p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos, false) case *ast.GoStmt: p.print(token.GO, blank) p.expr(s.Call) case *ast.DeferStmt: p.print(token.DEFER, blank) p.expr(s.Call) case *ast.ReturnStmt: p.print(token.RETURN) if s.Results != nil { p.print(blank) // Use indentList heuristic to make corner cases look // better (issue 1207). A more systematic approach would // always indent, but this would cause significant // reformatting of the code base and not necessarily // lead to more nicely formatted code in general. if p.indentList(s.Results) { p.print(indent) // Use NoPos so that a newline never goes before // the results (see issue #32854). p.exprList(token.NoPos, s.Results, 1, noIndent, token.NoPos, false) p.print(unindent) } else { p.exprList(token.NoPos, s.Results, 1, 0, token.NoPos, false) } } case *ast.BranchStmt: p.print(s.Tok) if s.Label != nil { p.print(blank) p.expr(s.Label) } case *ast.BlockStmt: p.block(s, 1) case *ast.IfStmt: p.print(token.IF) p.controlClause(false, s.Init, s.Cond, nil) p.block(s.Body, 1) if s.Else != nil { p.print(blank, token.ELSE, blank) switch s.Else.(type) { case *ast.BlockStmt, *ast.IfStmt: p.stmt(s.Else, nextIsRBrace) default: // This can only happen with an incorrectly // constructed AST. Permit it but print so // that it can be parsed without errors. p.print(token.LBRACE, indent, formfeed) p.stmt(s.Else, true) p.print(unindent, formfeed, token.RBRACE) } } case *ast.CaseClause: if s.List != nil { p.print(token.CASE, blank) p.exprList(s.Pos(), s.List, 1, 0, s.Colon, false) } else { p.print(token.DEFAULT) } p.setPos(s.Colon) p.print(token.COLON) p.stmtList(s.Body, 1, nextIsRBrace) case *ast.SwitchStmt: p.print(token.SWITCH) p.controlClause(false, s.Init, s.Tag, nil) p.block(s.Body, 0) case *ast.TypeSwitchStmt: p.print(token.SWITCH) if s.Init != nil { p.print(blank) p.stmt(s.Init, false) p.print(token.SEMICOLON) } p.print(blank) p.stmt(s.Assign, false) p.print(blank) p.block(s.Body, 0) case *ast.CommClause: if s.Comm != nil { p.print(token.CASE, blank) p.stmt(s.Comm, false) } else { p.print(token.DEFAULT) } p.setPos(s.Colon) p.print(token.COLON) p.stmtList(s.Body, 1, nextIsRBrace) case *ast.SelectStmt: p.print(token.SELECT, blank) body := s.Body if len(body.List) == 0 && !p.commentBefore(p.posFor(body.Rbrace)) { // print empty select statement w/o comments on one line p.setPos(body.Lbrace) p.print(token.LBRACE) p.setPos(body.Rbrace) p.print(token.RBRACE) } else { p.block(body, 0) } case *ast.ForStmt: p.print(token.FOR) p.controlClause(true, s.Init, s.Cond, s.Post) p.block(s.Body, 1) case *ast.RangeStmt: p.print(token.FOR, blank) if s.Key != nil { p.expr(s.Key) if s.Value != nil { // use position of value following the comma as // comma position for correct comment placement p.setPos(s.Value.Pos()) p.print(token.COMMA, blank) p.expr(s.Value) } p.print(blank) p.setPos(s.TokPos) p.print(s.Tok, blank) } p.print(token.RANGE, blank) p.expr(stripParens(s.X)) p.print(blank) p.block(s.Body, 1) default: panic("unreachable") } } // ---------------------------------------------------------------------------- // Declarations // The keepTypeColumn function determines if the type column of a series of // consecutive const or var declarations must be kept, or if initialization // values (V) can be placed in the type column (T) instead. The i'th entry // in the result slice is true if the type column in spec[i] must be kept. // // For example, the declaration: // // const ( // foobar int = 42 // comment // x = 7 // comment // foo // bar = 991 // ) // // leads to the type/values matrix below. A run of value columns (V) can // be moved into the type column if there is no type for any of the values // in that column (we only move entire columns so that they align properly). // // matrix formatted result // matrix // T V -> T V -> true there is a T and so the type // - V - V true column must be kept // - - - - false // - V V - false V is moved into T column func keepTypeColumn(specs []ast.Spec) []bool { m := make([]bool, len(specs)) populate := func(i, j int, keepType bool) { if keepType { for ; i < j; i++ { m[i] = true } } } i0 := -1 // if i0 >= 0 we are in a run and i0 is the start of the run var keepType bool for i, s := range specs { t := s.(*ast.ValueSpec) if t.Values != nil { if i0 < 0 { // start of a run of ValueSpecs with non-nil Values i0 = i keepType = false } } else { if i0 >= 0 { // end of a run populate(i0, i, keepType) i0 = -1 } } if t.Type != nil { keepType = true } } if i0 >= 0 { // end of a run populate(i0, len(specs), keepType) } return m } func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool) { p.setComment(s.Doc) p.identList(s.Names, false) // always present extraTabs := 3 if s.Type != nil || keepType { p.print(vtab) extraTabs-- } if s.Type != nil { p.expr(s.Type) } if s.Values != nil { p.print(vtab, token.ASSIGN, blank) p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false) extraTabs-- } if s.Comment != nil { for ; extraTabs > 0; extraTabs-- { p.print(vtab) } p.setComment(s.Comment) } } func sanitizeImportPath(lit *ast.BasicLit) *ast.BasicLit { // Note: An unmodified AST generated by go/parser will already // contain a backward- or double-quoted path string that does // not contain any invalid characters, and most of the work // here is not needed. However, a modified or generated AST // may possibly contain non-canonical paths. Do the work in // all cases since it's not too hard and not speed-critical. // if we don't have a proper string, be conservative and return whatever we have if lit.Kind != token.STRING { return lit } s, err := strconv.Unquote(lit.Value) if err != nil { return lit } // if the string is an invalid path, return whatever we have // // spec: "Implementation restriction: A compiler may restrict // ImportPaths to non-empty strings using only characters belonging // to Unicode's L, M, N, P, and S general categories (the Graphic // characters without spaces) and may also exclude the characters // !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character // U+FFFD." if s == "" { return lit } const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" for _, r := range s { if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { return lit } } // otherwise, return the double-quoted path s = strconv.Quote(s) if s == lit.Value { return lit // nothing wrong with lit } return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: token.STRING, Value: s} } // The parameter n is the number of specs in the group. If doIndent is set, // multi-line identifier lists in the spec are indented when the first // linebreak is encountered. func (p *printer) spec(spec ast.Spec, n int, doIndent bool) { switch s := spec.(type) { case *ast.ImportSpec: p.setComment(s.Doc) if s.Name != nil { p.expr(s.Name) p.print(blank) } p.expr(sanitizeImportPath(s.Path)) p.setComment(s.Comment) p.setPos(s.EndPos) case *ast.ValueSpec: if n != 1 { p.internalError("expected n = 1; got", n) } p.setComment(s.Doc) p.identList(s.Names, doIndent) // always present if s.Type != nil { p.print(blank) p.expr(s.Type) } if s.Values != nil { p.print(blank, token.ASSIGN, blank) p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false) } p.setComment(s.Comment) case *ast.TypeSpec: p.setComment(s.Doc) p.expr(s.Name) if s.TypeParams != nil { p.parameters(s.TypeParams, typeTParam) } if n == 1 { p.print(blank) } else { p.print(vtab) } if s.Assign.IsValid() { p.print(token.ASSIGN, blank) } p.expr(s.Type) p.setComment(s.Comment) default: panic("unreachable") } } func (p *printer) genDecl(d *ast.GenDecl) { p.setComment(d.Doc) p.setPos(d.Pos()) p.print(d.Tok, blank) if d.Lparen.IsValid() || len(d.Specs) > 1 { // group of parenthesized declarations p.setPos(d.Lparen) p.print(token.LPAREN) if n := len(d.Specs); n > 0 { p.print(indent, formfeed) if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) { // two or more grouped const/var declarations: // determine if the type column must be kept keepType := keepTypeColumn(d.Specs) var line int for i, s := range d.Specs { if i > 0 { p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0) } p.recordLine(&line) p.valueSpec(s.(*ast.ValueSpec), keepType[i]) } } else { var line int for i, s := range d.Specs { if i > 0 { p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0) } p.recordLine(&line) p.spec(s, n, false) } } p.print(unindent, formfeed) } p.setPos(d.Rparen) p.print(token.RPAREN) } else if len(d.Specs) > 0 { // single declaration p.spec(d.Specs[0], 1, true) } } // sizeCounter is an io.Writer which counts the number of bytes written, // as well as whether a newline character was seen. type sizeCounter struct { hasNewline bool size int } func (c *sizeCounter) Write(p []byte) (int, error) { if !c.hasNewline { for _, b := range p { if b == '\n' || b == '\f' { c.hasNewline = true break } } } c.size += len(p) return len(p), nil } // nodeSize determines the size of n in chars after formatting. // The result is <= maxSize if the node fits on one line with at // most maxSize chars and the formatted output doesn't contain // any control chars. Otherwise, the result is > maxSize. func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) { // nodeSize invokes the printer, which may invoke nodeSize // recursively. For deep composite literal nests, this can // lead to an exponential algorithm. Remember previous // results to prune the recursion (was issue 1628). if size, found := p.nodeSizes[n]; found { return size } size = maxSize + 1 // assume n doesn't fit p.nodeSizes[n] = size // nodeSize computation must be independent of particular // style so that we always get the same decision; print // in RawFormat cfg := Config{Mode: RawFormat} var counter sizeCounter if err := cfg.fprint(&counter, p.fset, n, p.nodeSizes); err != nil { return } if counter.size <= maxSize && !counter.hasNewline { // n fits in a single line size = counter.size p.nodeSizes[n] = size } return } // numLines returns the number of lines spanned by node n in the original source. func (p *printer) numLines(n ast.Node) int { if from := n.Pos(); from.IsValid() { if to := n.End(); to.IsValid() { return p.lineFor(to) - p.lineFor(from) + 1 } } return infinity } // bodySize is like nodeSize but it is specialized for *ast.BlockStmt's. func (p *printer) bodySize(b *ast.BlockStmt, maxSize int) int { pos1 := b.Pos() pos2 := b.Rbrace if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) { // opening and closing brace are on different lines - don't make it a one-liner return maxSize + 1 } if len(b.List) > 5 { // too many statements - don't make it a one-liner return maxSize + 1 } // otherwise, estimate body size bodySize := p.commentSizeBefore(p.posFor(pos2)) for i, s := range b.List { if bodySize > maxSize { break // no need to continue } if i > 0 { bodySize += 2 // space for a semicolon and blank } bodySize += p.nodeSize(s, maxSize) } return bodySize } // funcBody prints a function body following a function header of given headerSize. // If the header's and block's size are "small enough" and the block is "simple enough", // the block is printed on the current line, without line breaks, spaced from the header // by sep. Otherwise the block's opening "{" is printed on the current line, followed by // lines for the block's statements and its closing "}". func (p *printer) funcBody(headerSize int, sep whiteSpace, b *ast.BlockStmt) { if b == nil { return } // save/restore composite literal nesting level defer func(level int) { p.level = level }(p.level) p.level = 0 const maxSize = 100 if headerSize+p.bodySize(b, maxSize) <= maxSize { p.print(sep) p.setPos(b.Lbrace) p.print(token.LBRACE) if len(b.List) > 0 { p.print(blank) for i, s := range b.List { if i > 0 { p.print(token.SEMICOLON, blank) } p.stmt(s, i == len(b.List)-1) } p.print(blank) } p.print(noExtraLinebreak) p.setPos(b.Rbrace) p.print(token.RBRACE, noExtraLinebreak) return } if sep != ignore { p.print(blank) // always use blank } p.block(b, 1) } // distanceFrom returns the column difference between p.out (the current output // position) and startOutCol. If the start position is on a different line from // the current position (or either is unknown), the result is infinity. func (p *printer) distanceFrom(startPos token.Pos, startOutCol int) int { if startPos.IsValid() && p.pos.IsValid() && p.posFor(startPos).Line == p.pos.Line { return p.out.Column - startOutCol } return infinity } func (p *printer) funcDecl(d *ast.FuncDecl) { p.setComment(d.Doc) p.setPos(d.Pos()) p.print(token.FUNC, blank) // We have to save startCol only after emitting FUNC; otherwise it can be on a // different line (all whitespace preceding the FUNC is emitted only when the // FUNC is emitted). startCol := p.out.Column - len("func ") if d.Recv != nil { p.parameters(d.Recv, funcParam) // method: print receiver p.print(blank) } p.expr(d.Name) p.signature(d.Type) p.funcBody(p.distanceFrom(d.Pos(), startCol), vtab, d.Body) } func (p *printer) decl(decl ast.Decl) { switch d := decl.(type) { case *ast.BadDecl: p.setPos(d.Pos()) p.print("BadDecl") case *ast.GenDecl: p.genDecl(d) case *ast.FuncDecl: p.funcDecl(d) default: panic("unreachable") } } // ---------------------------------------------------------------------------- // Files func declToken(decl ast.Decl) (tok token.Token) { tok = token.ILLEGAL switch d := decl.(type) { case *ast.GenDecl: tok = d.Tok case *ast.FuncDecl: tok = token.FUNC } return } func (p *printer) declList(list []ast.Decl) { tok := token.ILLEGAL for _, d := range list { prev := tok tok = declToken(d) // If the declaration token changed (e.g., from CONST to TYPE) // or the next declaration has documentation associated with it, // print an empty line between top-level declarations. // (because p.linebreak is called with the position of d, which // is past any documentation, the minimum requirement is satisfied // even w/o the extra getDoc(d) nil-check - leave it in case the // linebreak logic improves - there's already a TODO). if len(p.output) > 0 { // only print line break if we are not at the beginning of the output // (i.e., we are not printing only a partial program) min := 1 if prev != tok || getDoc(d) != nil { min = 2 } // start a new section if the next declaration is a function // that spans multiple lines (see also issue #19544) p.linebreak(p.lineFor(d.Pos()), min, ignore, tok == token.FUNC && p.numLines(d) > 1) } p.decl(d) } } func (p *printer) file(src *ast.File) { p.setComment(src.Doc) p.setPos(src.Pos()) p.print(token.PACKAGE, blank) p.expr(src.Name) p.declList(src.Decls) p.print(newline) } gofumpt-0.6.0/internal/govendor/go/printer/printer.go000066400000000000000000001235031455555333400227570ustar00rootroot00000000000000// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package printer implements printing of AST nodes. package printer import ( "fmt" "go/ast" "go/build/constraint" "go/token" "io" "os" "strings" "sync" "text/tabwriter" "unicode" ) const ( maxNewlines = 2 // max. number of newlines between source text debug = false // enable for debugging infinity = 1 << 30 ) type whiteSpace byte const ( ignore = whiteSpace(0) blank = whiteSpace(' ') vtab = whiteSpace('\v') newline = whiteSpace('\n') formfeed = whiteSpace('\f') indent = whiteSpace('>') unindent = whiteSpace('<') ) // A pmode value represents the current printer mode. type pmode int const ( noExtraBlank pmode = 1 << iota // disables extra blank after /*-style comment noExtraLinebreak // disables extra line break after /*-style comment ) type commentInfo struct { cindex int // current comment index comment *ast.CommentGroup // = printer.comments[cindex]; or nil commentOffset int // = printer.posFor(printer.comments[cindex].List[0].Pos()).Offset; or infinity commentNewline bool // true if the comment group contains newlines } type printer struct { // Configuration (does not change after initialization) Config fset *token.FileSet // Current state output []byte // raw printer result indent int // current indentation level int // level == 0: outside composite literal; level > 0: inside composite literal mode pmode // current printer mode endAlignment bool // if set, terminate alignment immediately impliedSemi bool // if set, a linebreak implies a semicolon lastTok token.Token // last token printed (token.ILLEGAL if it's whitespace) prevOpen token.Token // previous non-brace "open" token (, [, or token.ILLEGAL wsbuf []whiteSpace // delayed white space goBuild []int // start index of all //go:build comments in output plusBuild []int // start index of all // +build comments in output // Positions // The out position differs from the pos position when the result // formatting differs from the source formatting (in the amount of // white space). If there's a difference and SourcePos is set in // ConfigMode, //line directives are used in the output to restore // original source positions for a reader. pos token.Position // current position in AST (source) space out token.Position // current position in output space last token.Position // value of pos after calling writeString linePtr *int // if set, record out.Line for the next token in *linePtr sourcePosErr error // if non-nil, the first error emitting a //line directive // The list of all source comments, in order of appearance. comments []*ast.CommentGroup // may be nil useNodeComments bool // if not set, ignore lead and line comments of nodes // Information about p.comments[p.cindex]; set up by nextComment. commentInfo // Cache of already computed node sizes. nodeSizes map[ast.Node]int // Cache of most recently computed line position. cachedPos token.Pos cachedLine int // line corresponding to cachedPos } func (p *printer) internalError(msg ...any) { if debug { fmt.Print(p.pos.String() + ": ") fmt.Println(msg...) panic("mvdan.cc/gofumpt/internal/govendor/go/printer") } } // commentsHaveNewline reports whether a list of comments belonging to // an *ast.CommentGroup contains newlines. Because the position information // may only be partially correct, we also have to read the comment text. func (p *printer) commentsHaveNewline(list []*ast.Comment) bool { // len(list) > 0 line := p.lineFor(list[0].Pos()) for i, c := range list { if i > 0 && p.lineFor(list[i].Pos()) != line { // not all comments on the same line return true } if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) { return true } } _ = line return false } func (p *printer) nextComment() { for p.cindex < len(p.comments) { c := p.comments[p.cindex] p.cindex++ if list := c.List; len(list) > 0 { p.comment = c p.commentOffset = p.posFor(list[0].Pos()).Offset p.commentNewline = p.commentsHaveNewline(list) return } // we should not reach here (correct ASTs don't have empty // ast.CommentGroup nodes), but be conservative and try again } // no more comments p.commentOffset = infinity } // commentBefore reports whether the current comment group occurs // before the next position in the source code and printing it does // not introduce implicit semicolons. func (p *printer) commentBefore(next token.Position) bool { return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline) } // commentSizeBefore returns the estimated size of the // comments on the same line before the next position. func (p *printer) commentSizeBefore(next token.Position) int { // save/restore current p.commentInfo (p.nextComment() modifies it) defer func(info commentInfo) { p.commentInfo = info }(p.commentInfo) size := 0 for p.commentBefore(next) { for _, c := range p.comment.List { size += len(c.Text) } p.nextComment() } return size } // recordLine records the output line number for the next non-whitespace // token in *linePtr. It is used to compute an accurate line number for a // formatted construct, independent of pending (not yet emitted) whitespace // or comments. func (p *printer) recordLine(linePtr *int) { p.linePtr = linePtr } // linesFrom returns the number of output lines between the current // output line and the line argument, ignoring any pending (not yet // emitted) whitespace or comments. It is used to compute an accurate // size (in number of lines) for a formatted construct. func (p *printer) linesFrom(line int) int { return p.out.Line - line } func (p *printer) posFor(pos token.Pos) token.Position { // not used frequently enough to cache entire token.Position return p.fset.PositionFor(pos, false /* absolute position */) } func (p *printer) lineFor(pos token.Pos) int { if pos != p.cachedPos { p.cachedPos = pos p.cachedLine = p.fset.PositionFor(pos, false /* absolute position */).Line } return p.cachedLine } // writeLineDirective writes a //line directive if necessary. func (p *printer) writeLineDirective(pos token.Position) { if pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) { if strings.ContainsAny(pos.Filename, "\r\n") { if p.sourcePosErr == nil { p.sourcePosErr = fmt.Errorf("mvdan.cc/gofumpt/internal/govendor/go/printer: source filename contains unexpected newline character: %q", pos.Filename) } return } p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...) p.output = append(p.output, tabwriter.Escape) // p.out must match the //line directive p.out.Filename = pos.Filename p.out.Line = pos.Line } } // writeIndent writes indentation. func (p *printer) writeIndent() { // use "hard" htabs - indentation columns // must not be discarded by the tabwriter n := p.Config.Indent + p.indent // include base indentation for i := 0; i < n; i++ { p.output = append(p.output, '\t') } // update positions p.pos.Offset += n p.pos.Column += n p.out.Column += n } // writeByte writes ch n times to p.output and updates p.pos. // Only used to write formatting (white space) characters. func (p *printer) writeByte(ch byte, n int) { if p.endAlignment { // Ignore any alignment control character; // and at the end of the line, break with // a formfeed to indicate termination of // existing columns. switch ch { case '\t', '\v': ch = ' ' case '\n', '\f': ch = '\f' p.endAlignment = false } } if p.out.Column == 1 { // no need to write line directives before white space p.writeIndent() } for i := 0; i < n; i++ { p.output = append(p.output, ch) } // update positions p.pos.Offset += n if ch == '\n' || ch == '\f' { p.pos.Line += n p.out.Line += n p.pos.Column = 1 p.out.Column = 1 return } p.pos.Column += n p.out.Column += n } // writeString writes the string s to p.output and updates p.pos, p.out, // and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters // to protect s from being interpreted by the tabwriter. // // Note: writeString is only used to write Go tokens, literals, and // comments, all of which must be written literally. Thus, it is correct // to always set isLit = true. However, setting it explicitly only when // needed (i.e., when we don't know that s contains no tabs or line breaks) // avoids processing extra escape characters and reduces run time of the // printer benchmark by up to 10%. func (p *printer) writeString(pos token.Position, s string, isLit bool) { if p.out.Column == 1 { if p.Config.Mode&SourcePos != 0 { p.writeLineDirective(pos) } p.writeIndent() } if pos.IsValid() { // update p.pos (if pos is invalid, continue with existing p.pos) // Note: Must do this after handling line beginnings because // writeIndent updates p.pos if there's indentation, but p.pos // is the position of s. p.pos = pos } if isLit { // Protect s such that is passes through the tabwriter // unchanged. Note that valid Go programs cannot contain // tabwriter.Escape bytes since they do not appear in legal // UTF-8 sequences. p.output = append(p.output, tabwriter.Escape) } if debug { p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos! } p.output = append(p.output, s...) // update positions nlines := 0 var li int // index of last newline; valid if nlines > 0 for i := 0; i < len(s); i++ { // Raw string literals may contain any character except back quote (`). if ch := s[i]; ch == '\n' || ch == '\f' { // account for line break nlines++ li = i // A line break inside a literal will break whatever column // formatting is in place; ignore any further alignment through // the end of the line. p.endAlignment = true } } p.pos.Offset += len(s) if nlines > 0 { p.pos.Line += nlines p.out.Line += nlines c := len(s) - li p.pos.Column = c p.out.Column = c } else { p.pos.Column += len(s) p.out.Column += len(s) } if isLit { p.output = append(p.output, tabwriter.Escape) } p.last = p.pos } // writeCommentPrefix writes the whitespace before a comment. // If there is any pending whitespace, it consumes as much of // it as is likely to help position the comment nicely. // pos is the comment position, next the position of the item // after all pending comments, prev is the previous comment in // a group of comments (or nil), and tok is the next token. func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, tok token.Token) { if len(p.output) == 0 { // the comment is the first item to be printed - don't write any whitespace return } if pos.IsValid() && pos.Filename != p.last.Filename { // comment in a different file - separate with newlines p.writeByte('\f', maxNewlines) return } if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') { // comment on the same line as last item: // separate with at least one separator hasSep := false if prev == nil { // first comment of a comment group j := 0 for i, ch := range p.wsbuf { switch ch { case blank: // ignore any blanks before a comment p.wsbuf[i] = ignore continue case vtab: // respect existing tabs - important // for proper formatting of commented structs hasSep = true continue case indent: // apply pending indentation continue } j = i break } p.writeWhitespace(j) } // make sure there is at least one separator if !hasSep { sep := byte('\t') if pos.Line == next.Line { // next item is on the same line as the comment // (which must be a /*-style comment): separate // with a blank instead of a tab sep = ' ' } p.writeByte(sep, 1) } } else { // comment on a different line: // separate with at least one line break droppedLinebreak := false j := 0 for i, ch := range p.wsbuf { switch ch { case blank, vtab: // ignore any horizontal whitespace before line breaks p.wsbuf[i] = ignore continue case indent: // apply pending indentation continue case unindent: // if this is not the last unindent, apply it // as it is (likely) belonging to the last // construct (e.g., a multi-line expression list) // and is not part of closing a block if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent { continue } // if the next token is not a closing }, apply the unindent // if it appears that the comment is aligned with the // token; otherwise assume the unindent is part of a // closing block and stop (this scenario appears with // comments before a case label where the comments // apply to the next case instead of the current one) if tok != token.RBRACE && pos.Column == next.Column { continue } case newline, formfeed: p.wsbuf[i] = ignore droppedLinebreak = prev == nil // record only if first comment of a group } j = i break } p.writeWhitespace(j) // determine number of linebreaks before the comment n := 0 if pos.IsValid() && p.last.IsValid() { n = pos.Line - p.last.Line if n < 0 { // should never happen n = 0 } } // at the package scope level only (p.indent == 0), // add an extra newline if we dropped one before: // this preserves a blank line before documentation // comments at the package scope level (issue 2570) if p.indent == 0 && droppedLinebreak { n++ } // make sure there is at least one line break // if the previous comment was a line comment if n == 0 && prev != nil && prev.Text[1] == '/' { n = 1 } if n > 0 { // use formfeeds to break columns before a comment; // this is analogous to using formfeeds to separate // individual lines of /*-style comments p.writeByte('\f', nlimit(n)) } } } // Returns true if s contains only white space // (only tabs and blanks can appear in the printer's context). func isBlank(s string) bool { for i := 0; i < len(s); i++ { if s[i] > ' ' { return false } } return true } // commonPrefix returns the common prefix of a and b. func commonPrefix(a, b string) string { i := 0 for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') { i++ } return a[0:i] } // trimRight returns s with trailing whitespace removed. func trimRight(s string) string { return strings.TrimRightFunc(s, unicode.IsSpace) } // stripCommonPrefix removes a common prefix from /*-style comment lines (unless no // comment line is indented, all but the first line have some form of space prefix). // The prefix is computed using heuristics such that is likely that the comment // contents are nicely laid out after re-printing each line using the printer's // current indentation. func stripCommonPrefix(lines []string) { if len(lines) <= 1 { return // at most one line - nothing to do } // len(lines) > 1 // The heuristic in this function tries to handle a few // common patterns of /*-style comments: Comments where // the opening /* and closing */ are aligned and the // rest of the comment text is aligned and indented with // blanks or tabs, cases with a vertical "line of stars" // on the left, and cases where the closing */ is on the // same line as the last comment text. // Compute maximum common white prefix of all but the first, // last, and blank lines, and replace blank lines with empty // lines (the first line starts with /* and has no prefix). // In cases where only the first and last lines are not blank, // such as two-line comments, or comments where all inner lines // are blank, consider the last line for the prefix computation // since otherwise the prefix would be empty. // // Note that the first and last line are never empty (they // contain the opening /* and closing */ respectively) and // thus they can be ignored by the blank line check. prefix := "" prefixSet := false if len(lines) > 2 { for i, line := range lines[1 : len(lines)-1] { if isBlank(line) { lines[1+i] = "" // range starts with lines[1] } else { if !prefixSet { prefix = line prefixSet = true } prefix = commonPrefix(prefix, line) } } } // If we don't have a prefix yet, consider the last line. if !prefixSet { line := lines[len(lines)-1] prefix = commonPrefix(line, line) } /* * Check for vertical "line of stars" and correct prefix accordingly. */ lineOfStars := false if p, _, ok := strings.Cut(prefix, "*"); ok { // remove trailing blank from prefix so stars remain aligned prefix = strings.TrimSuffix(p, " ") lineOfStars = true } else { // No line of stars present. // Determine the white space on the first line after the /* // and before the beginning of the comment text, assume two // blanks instead of the /* unless the first character after // the /* is a tab. If the first comment line is empty but // for the opening /*, assume up to 3 blanks or a tab. This // whitespace may be found as suffix in the common prefix. first := lines[0] if isBlank(first[2:]) { // no comment text on the first line: // reduce prefix by up to 3 blanks or a tab // if present - this keeps comment text indented // relative to the /* and */'s if it was indented // in the first place i := len(prefix) for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ { i-- } if i == len(prefix) && i > 0 && prefix[i-1] == '\t' { i-- } prefix = prefix[0:i] } else { // comment text on the first line suffix := make([]byte, len(first)) n := 2 // start after opening /* for n < len(first) && first[n] <= ' ' { suffix[n] = first[n] n++ } if n > 2 && suffix[2] == '\t' { // assume the '\t' compensates for the /* suffix = suffix[2:n] } else { // otherwise assume two blanks suffix[0], suffix[1] = ' ', ' ' suffix = suffix[0:n] } // Shorten the computed common prefix by the length of // suffix, if it is found as suffix of the prefix. prefix = strings.TrimSuffix(prefix, string(suffix)) } } // Handle last line: If it only contains a closing */, align it // with the opening /*, otherwise align the text with the other // lines. last := lines[len(lines)-1] closing := "*/" before, _, _ := strings.Cut(last, closing) // closing always present if isBlank(before) { // last line only contains closing */ if lineOfStars { closing = " */" // add blank to align final star } lines[len(lines)-1] = prefix + closing } else { // last line contains more comment text - assume // it is aligned like the other lines and include // in prefix computation prefix = commonPrefix(prefix, last) } // Remove the common prefix from all but the first and empty lines. for i, line := range lines { if i > 0 && line != "" { lines[i] = line[len(prefix):] } } } func (p *printer) writeComment(comment *ast.Comment) { text := comment.Text pos := p.posFor(comment.Pos()) const linePrefix = "//line " if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) { // Possibly a //-style line directive. // Suspend indentation temporarily to keep line directive valid. defer func(indent int) { p.indent = indent }(p.indent) p.indent = 0 } // shortcut common case of //-style comments if text[1] == '/' { if constraint.IsGoBuild(text) { p.goBuild = append(p.goBuild, len(p.output)) } else if constraint.IsPlusBuild(text) { p.plusBuild = append(p.plusBuild, len(p.output)) } p.writeString(pos, trimRight(text), true) return } // for /*-style comments, print line by line and let the // write function take care of the proper indentation lines := strings.Split(text, "\n") // The comment started in the first column but is going // to be indented. For an idempotent result, add indentation // to all lines such that they look like they were indented // before - this will make sure the common prefix computation // is the same independent of how many times formatting is // applied (was issue 1835). if pos.IsValid() && pos.Column == 1 && p.indent > 0 { for i, line := range lines[1:] { lines[1+i] = " " + line } } stripCommonPrefix(lines) // write comment lines, separated by formfeed, // without a line break after the last line for i, line := range lines { if i > 0 { p.writeByte('\f', 1) pos = p.pos } if len(line) > 0 { p.writeString(pos, trimRight(line), true) } } } // writeCommentSuffix writes a line break after a comment if indicated // and processes any leftover indentation information. If a line break // is needed, the kind of break (newline vs formfeed) depends on the // pending whitespace. The writeCommentSuffix result indicates if a // newline was written or if a formfeed was dropped from the whitespace // buffer. func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) { for i, ch := range p.wsbuf { switch ch { case blank, vtab: // ignore trailing whitespace p.wsbuf[i] = ignore case indent, unindent: // don't lose indentation information case newline, formfeed: // if we need a line break, keep exactly one // but remember if we dropped any formfeeds if needsLinebreak { needsLinebreak = false wroteNewline = true } else { if ch == formfeed { droppedFF = true } p.wsbuf[i] = ignore } } } p.writeWhitespace(len(p.wsbuf)) // make sure we have a line break if needsLinebreak { p.writeByte('\n', 1) wroteNewline = true } return } // containsLinebreak reports whether the whitespace buffer contains any line breaks. func (p *printer) containsLinebreak() bool { for _, ch := range p.wsbuf { if ch == newline || ch == formfeed { return true } } return false } // intersperseComments consumes all comments that appear before the next token // tok and prints it together with the buffered whitespace (i.e., the whitespace // that needs to be written before the next token). A heuristic is used to mix // the comments and whitespace. The intersperseComments result indicates if a // newline was written or if a formfeed was dropped from the whitespace buffer. func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { var last *ast.Comment for p.commentBefore(next) { list := p.comment.List changed := false if p.lastTok != token.IMPORT && // do not rewrite cgo's import "C" comments p.posFor(p.comment.Pos()).Column == 1 && p.posFor(p.comment.End()+1) == next { // Unindented comment abutting next token position: // a top-level doc comment. list = formatDocComment(list) changed = true if len(p.comment.List) > 0 && len(list) == 0 { // The doc comment was removed entirely. // Keep preceding whitespace. p.writeCommentPrefix(p.posFor(p.comment.Pos()), next, last, tok) // Change print state to continue at next. p.pos = next p.last = next // There can't be any more comments. p.nextComment() return p.writeCommentSuffix(false) } } for _, c := range list { p.writeCommentPrefix(p.posFor(c.Pos()), next, last, tok) p.writeComment(c) last = c } // In case list was rewritten, change print state to where // the original list would have ended. if len(p.comment.List) > 0 && changed { last = p.comment.List[len(p.comment.List)-1] p.pos = p.posFor(last.End()) p.last = p.pos } p.nextComment() } if last != nil { // If the last comment is a /*-style comment and the next item // follows on the same line but is not a comma, and not a "closing" // token immediately following its corresponding "opening" token, // add an extra separator unless explicitly disabled. Use a blank // as separator unless we have pending linebreaks, they are not // disabled, and we are outside a composite literal, in which case // we want a linebreak (issue 15137). // TODO(gri) This has become overly complicated. We should be able // to track whether we're inside an expression or statement and // use that information to decide more directly. needsLinebreak := false if p.mode&noExtraBlank == 0 && last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line && tok != token.COMMA && (tok != token.RPAREN || p.prevOpen == token.LPAREN) && (tok != token.RBRACK || p.prevOpen == token.LBRACK) { if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 && p.level == 0 { needsLinebreak = true } else { p.writeByte(' ', 1) } } // Ensure that there is a line break after a //-style comment, // before EOF, and before a closing '}' unless explicitly disabled. if last.Text[1] == '/' || tok == token.EOF || tok == token.RBRACE && p.mode&noExtraLinebreak == 0 { needsLinebreak = true } return p.writeCommentSuffix(needsLinebreak) } // no comment was written - we should never reach here since // intersperseComments should not be called in that case p.internalError("intersperseComments called without pending comments") return } // writeWhitespace writes the first n whitespace entries. func (p *printer) writeWhitespace(n int) { // write entries for i := 0; i < n; i++ { switch ch := p.wsbuf[i]; ch { case ignore: // ignore! case indent: p.indent++ case unindent: p.indent-- if p.indent < 0 { p.internalError("negative indentation:", p.indent) p.indent = 0 } case newline, formfeed: // A line break immediately followed by a "correcting" // unindent is swapped with the unindent - this permits // proper label positioning. If a comment is between // the line break and the label, the unindent is not // part of the comment whitespace prefix and the comment // will be positioned correctly indented. if i+1 < n && p.wsbuf[i+1] == unindent { // Use a formfeed to terminate the current section. // Otherwise, a long label name on the next line leading // to a wide column may increase the indentation column // of lines before the label; effectively leading to wrong // indentation. p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed i-- // do it again continue } fallthrough default: p.writeByte(byte(ch), 1) } } // shift remaining entries down l := copy(p.wsbuf, p.wsbuf[n:]) p.wsbuf = p.wsbuf[:l] } // ---------------------------------------------------------------------------- // Printing interface // nlimit limits n to maxNewlines. func nlimit(n int) int { if n > maxNewlines { n = maxNewlines } return n } func mayCombine(prev token.Token, next byte) (b bool) { switch prev { case token.INT: b = next == '.' // 1. case token.ADD: b = next == '+' // ++ case token.SUB: b = next == '-' // -- case token.QUO: b = next == '*' // /* case token.LSS: b = next == '-' || next == '<' // <- or << case token.AND: b = next == '&' || next == '^' // && or &^ } return } func (p *printer) setPos(pos token.Pos) { if pos.IsValid() { p.pos = p.posFor(pos) // accurate position of next item } } // print prints a list of "items" (roughly corresponding to syntactic // tokens, but also including whitespace and formatting information). // It is the only print function that should be called directly from // any of the AST printing functions in nodes.go. // // Whitespace is accumulated until a non-whitespace token appears. Any // comments that need to appear before that token are printed first, // taking into account the amount and structure of any pending white- // space for best comment placement. Then, any leftover whitespace is // printed, followed by the actual token. func (p *printer) print(args ...any) { for _, arg := range args { // information about the current arg var data string var isLit bool var impliedSemi bool // value for p.impliedSemi after this arg // record previous opening token, if any switch p.lastTok { case token.ILLEGAL: // ignore (white space) case token.LPAREN, token.LBRACK: p.prevOpen = p.lastTok default: // other tokens followed any opening token p.prevOpen = token.ILLEGAL } switch x := arg.(type) { case pmode: // toggle printer mode p.mode ^= x continue case whiteSpace: if x == ignore { // don't add ignore's to the buffer; they // may screw up "correcting" unindents (see // LabeledStmt) continue } i := len(p.wsbuf) if i == cap(p.wsbuf) { // Whitespace sequences are very short so this should // never happen. Handle gracefully (but possibly with // bad comment placement) if it does happen. p.writeWhitespace(i) i = 0 } p.wsbuf = p.wsbuf[0 : i+1] p.wsbuf[i] = x if x == newline || x == formfeed { // newlines affect the current state (p.impliedSemi) // and not the state after printing arg (impliedSemi) // because comments can be interspersed before the arg // in this case p.impliedSemi = false } p.lastTok = token.ILLEGAL continue case *ast.Ident: data = x.Name impliedSemi = true p.lastTok = token.IDENT case *ast.BasicLit: data = x.Value isLit = true impliedSemi = true p.lastTok = x.Kind case token.Token: s := x.String() if mayCombine(p.lastTok, s[0]) { // the previous and the current token must be // separated by a blank otherwise they combine // into a different incorrect token sequence // (except for token.INT followed by a '.' this // should never happen because it is taken care // of via binary expression formatting) if len(p.wsbuf) != 0 { p.internalError("whitespace buffer not empty") } p.wsbuf = p.wsbuf[0:1] p.wsbuf[0] = ' ' } data = s // some keywords followed by a newline imply a semicolon switch x { case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN, token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE: impliedSemi = true } p.lastTok = x case string: // incorrect AST - print error message data = x isLit = true impliedSemi = true p.lastTok = token.STRING default: fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg) panic("mvdan.cc/gofumpt/internal/govendor/go/printer type") } // data != "" next := p.pos // estimated/accurate position of next item wroteNewline, droppedFF := p.flush(next, p.lastTok) // intersperse extra newlines if present in the source and // if they don't cause extra semicolons (don't do this in // flush as it will cause extra newlines at the end of a file) if !p.impliedSemi { n := nlimit(next.Line - p.pos.Line) // don't exceed maxNewlines if we already wrote one if wroteNewline && n == maxNewlines { n = maxNewlines - 1 } if n > 0 { ch := byte('\n') if droppedFF { ch = '\f' // use formfeed since we dropped one before } p.writeByte(ch, n) impliedSemi = false } } // the next token starts now - record its line number if requested if p.linePtr != nil { *p.linePtr = p.out.Line p.linePtr = nil } p.writeString(next, data, isLit) p.impliedSemi = impliedSemi } } // flush prints any pending comments and whitespace occurring textually // before the position of the next token tok. The flush result indicates // if a newline was written or if a formfeed was dropped from the whitespace // buffer. func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { if p.commentBefore(next) { // if there are comments before the next item, intersperse them wroteNewline, droppedFF = p.intersperseComments(next, tok) } else { // otherwise, write any leftover whitespace p.writeWhitespace(len(p.wsbuf)) } return } // getDoc returns the ast.CommentGroup associated with n, if any. func getDoc(n ast.Node) *ast.CommentGroup { switch n := n.(type) { case *ast.Field: return n.Doc case *ast.ImportSpec: return n.Doc case *ast.ValueSpec: return n.Doc case *ast.TypeSpec: return n.Doc case *ast.GenDecl: return n.Doc case *ast.FuncDecl: return n.Doc case *ast.File: return n.Doc } return nil } func getLastComment(n ast.Node) *ast.CommentGroup { switch n := n.(type) { case *ast.Field: return n.Comment case *ast.ImportSpec: return n.Comment case *ast.ValueSpec: return n.Comment case *ast.TypeSpec: return n.Comment case *ast.GenDecl: if len(n.Specs) > 0 { return getLastComment(n.Specs[len(n.Specs)-1]) } case *ast.File: if len(n.Comments) > 0 { return n.Comments[len(n.Comments)-1] } } return nil } func (p *printer) printNode(node any) error { // unpack *CommentedNode, if any var comments []*ast.CommentGroup if cnode, ok := node.(*CommentedNode); ok { node = cnode.Node comments = cnode.Comments } if comments != nil { // commented node - restrict comment list to relevant range n, ok := node.(ast.Node) if !ok { goto unsupported } beg := n.Pos() end := n.End() // if the node has associated documentation, // include that commentgroup in the range // (the comment list is sorted in the order // of the comment appearance in the source code) if doc := getDoc(n); doc != nil { beg = doc.Pos() } if com := getLastComment(n); com != nil { if e := com.End(); e > end { end = e } } // token.Pos values are global offsets, we can // compare them directly i := 0 for i < len(comments) && comments[i].End() < beg { i++ } j := i for j < len(comments) && comments[j].Pos() < end { j++ } if i < j { p.comments = comments[i:j] } } else if n, ok := node.(*ast.File); ok { // use ast.File comments, if any p.comments = n.Comments } // if there are no comments, use node comments p.useNodeComments = p.comments == nil // get comments ready for use p.nextComment() p.print(pmode(0)) // format node switch n := node.(type) { case ast.Expr: p.expr(n) case ast.Stmt: // A labeled statement will un-indent to position the label. // Set p.indent to 1 so we don't get indent "underflow". if _, ok := n.(*ast.LabeledStmt); ok { p.indent = 1 } p.stmt(n, false) case ast.Decl: p.decl(n) case ast.Spec: p.spec(n, 1, false) case []ast.Stmt: // A labeled statement will un-indent to position the label. // Set p.indent to 1 so we don't get indent "underflow". for _, s := range n { if _, ok := s.(*ast.LabeledStmt); ok { p.indent = 1 } } p.stmtList(n, 0, false) case []ast.Decl: p.declList(n) case *ast.File: p.file(n) default: goto unsupported } return p.sourcePosErr unsupported: return fmt.Errorf("mvdan.cc/gofumpt/internal/govendor/go/printer: unsupported node type %T", node) } // ---------------------------------------------------------------------------- // Trimmer // A trimmer is an io.Writer filter for stripping tabwriter.Escape // characters, trailing blanks and tabs, and for converting formfeed // and vtab characters into newlines and htabs (in case no tabwriter // is used). Text bracketed by tabwriter.Escape characters is passed // through unchanged. type trimmer struct { output io.Writer state int space []byte } // trimmer is implemented as a state machine. // It can be in one of the following states: const ( inSpace = iota // inside space inEscape // inside text bracketed by tabwriter.Escapes inText // inside text ) func (p *trimmer) resetSpace() { p.state = inSpace p.space = p.space[0:0] } // Design note: It is tempting to eliminate extra blanks occurring in // whitespace in this function as it could simplify some // of the blanks logic in the node printing functions. // However, this would mess up any formatting done by // the tabwriter. var aNewline = []byte("\n") func (p *trimmer) Write(data []byte) (n int, err error) { // invariants: // p.state == inSpace: // p.space is unwritten // p.state == inEscape, inText: // data[m:n] is unwritten m := 0 var b byte for n, b = range data { if b == '\v' { b = '\t' // convert to htab } switch p.state { case inSpace: switch b { case '\t', ' ': p.space = append(p.space, b) case '\n', '\f': p.resetSpace() // discard trailing space _, err = p.output.Write(aNewline) case tabwriter.Escape: _, err = p.output.Write(p.space) p.state = inEscape m = n + 1 // +1: skip tabwriter.Escape default: _, err = p.output.Write(p.space) p.state = inText m = n } case inEscape: if b == tabwriter.Escape { _, err = p.output.Write(data[m:n]) p.resetSpace() } case inText: switch b { case '\t', ' ': _, err = p.output.Write(data[m:n]) p.resetSpace() p.space = append(p.space, b) case '\n', '\f': _, err = p.output.Write(data[m:n]) p.resetSpace() if err == nil { _, err = p.output.Write(aNewline) } case tabwriter.Escape: _, err = p.output.Write(data[m:n]) p.state = inEscape m = n + 1 // +1: skip tabwriter.Escape } default: panic("unreachable") } if err != nil { return } } n = len(data) switch p.state { case inEscape, inText: _, err = p.output.Write(data[m:n]) p.resetSpace() } return } // ---------------------------------------------------------------------------- // Public interface // A Mode value is a set of flags (or 0). They control printing. type Mode uint const ( RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored TabIndent // use tabs for indentation independent of UseSpaces UseSpaces // use spaces instead of tabs for alignment SourcePos // emit //line directives to preserve original source positions ) // The mode below is not included in printer's public API because // editing code text is deemed out of scope. Because this mode is // unexported, it's also possible to modify or remove it based on // the evolving needs of mvdan.cc/gofumpt/internal/govendor/go/format and cmd/gofmt without breaking // users. See discussion in CL 240683. const ( // normalizeNumbers means to canonicalize number // literal prefixes and exponents while printing. // // This value is known in and used by mvdan.cc/gofumpt/internal/govendor/go/format and cmd/gofmt. // It is currently more convenient and performant for those // packages to apply number normalization during printing, // rather than by modifying the AST in advance. normalizeNumbers Mode = 1 << 30 ) // A Config node controls the output of Fprint. type Config struct { Mode Mode // default: 0 Tabwidth int // default: 8 Indent int // default: 0 (all code is indented at least by this much) } var printerPool = sync.Pool{ New: func() any { return &printer{ // Whitespace sequences are short. wsbuf: make([]whiteSpace, 0, 16), // We start the printer with a 16K output buffer, which is currently // larger than about 80% of Go files in the standard library. output: make([]byte, 0, 16<<10), } }, } func newPrinter(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) *printer { p := printerPool.Get().(*printer) *p = printer{ Config: *cfg, fset: fset, pos: token.Position{Line: 1, Column: 1}, out: token.Position{Line: 1, Column: 1}, wsbuf: p.wsbuf[:0], nodeSizes: nodeSizes, cachedPos: -1, output: p.output[:0], } return p } func (p *printer) free() { // Hard limit on buffer size; see https://golang.org/issue/23199. if cap(p.output) > 64<<10 { return } printerPool.Put(p) } // fprint implements Fprint and takes a nodesSizes map for setting up the printer state. func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node any, nodeSizes map[ast.Node]int) (err error) { // print node p := newPrinter(cfg, fset, nodeSizes) defer p.free() if err = p.printNode(node); err != nil { return } // print outstanding comments p.impliedSemi = false // EOF acts like a newline p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF) // output is buffered in p.output now. // fix //go:build and // +build comments if needed. p.fixGoBuildLines() // redirect output through a trimmer to eliminate trailing whitespace // (Input to a tabwriter must be untrimmed since trailing tabs provide // formatting information. The tabwriter could provide trimming // functionality but no tabwriter is used when RawFormat is set.) output = &trimmer{output: output} // redirect output through a tabwriter if necessary if cfg.Mode&RawFormat == 0 { minwidth := cfg.Tabwidth padchar := byte('\t') if cfg.Mode&UseSpaces != 0 { padchar = ' ' } twmode := tabwriter.DiscardEmptyColumns if cfg.Mode&TabIndent != 0 { minwidth = 0 twmode |= tabwriter.TabIndent } output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode) } // write printer result via tabwriter/trimmer to output if _, err = output.Write(p.output); err != nil { return } // flush tabwriter, if any if tw, _ := output.(*tabwriter.Writer); tw != nil { err = tw.Flush() } return } // A CommentedNode bundles an AST node and corresponding comments. // It may be provided as argument to any of the Fprint functions. type CommentedNode struct { Node any // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt Comments []*ast.CommentGroup } // Fprint "pretty-prints" an AST node to output for a given configuration cfg. // Position information is interpreted relative to the file set fset. // The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt, // or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt. func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node any) error { return cfg.fprint(output, fset, node, make(map[ast.Node]int)) } // Fprint "pretty-prints" an AST node to output. // It calls Config.Fprint with default settings. // Note that gofmt uses tabs for indentation but spaces for alignment; // use format.Node (package mvdan.cc/gofumpt/internal/govendor/go/format) for output that matches gofmt. func Fprint(output io.Writer, fset *token.FileSet, node any) error { return (&Config{Tabwidth: 8}).Fprint(output, fset, node) } gofumpt-0.6.0/internal/govendor/version.txt000066400000000000000000000000111455555333400210670ustar00rootroot00000000000000go1.21.0 gofumpt-0.6.0/internal/version/000077500000000000000000000000001455555333400165135ustar00rootroot00000000000000gofumpt-0.6.0/internal/version/version.go000066400000000000000000000053361455555333400205360ustar00rootroot00000000000000// Copyright (c) 2020, Daniel Martí // See LICENSE for licensing information package version import ( "encoding/json" "fmt" "os" "runtime" "runtime/debug" "time" "golang.org/x/mod/module" ) // Note that this is not a main package, so a "var version" will not work with // our go-cross script which uses -ldflags=main.version=xxx. const ourModulePath = "mvdan.cc/gofumpt" const fallbackVersion = "(devel)" // to match the default from runtime/debug func findModule(info *debug.BuildInfo, modulePath string) *debug.Module { if info.Main.Path == modulePath { return &info.Main } for _, dep := range info.Deps { if dep.Path == modulePath { return dep } } return nil } func gofumptVersion() string { info, ok := debug.ReadBuildInfo() if !ok { return fallbackVersion // no build info available } // Note that gofumpt may be used as a library via the format package, // so we cannot assume it is the main module in the build. mod := findModule(info, ourModulePath) if mod == nil { return fallbackVersion // not found? } if mod.Replace != nil { mod = mod.Replace } // If we found a meaningful version, we are done. // If gofumpt is not the main module, stop as well, // as VCS info is only for the main module. if mod.Version != "(devel)" || mod != &info.Main { return mod.Version } // Fall back to trying to use VCS information. // Until https://github.com/golang/go/issues/50603 is implemented, // manually construct something like a pseudo-version. // TODO: remove when this code is dead, hopefully in Go 1.20. // For the tests, as we don't want the VCS information to change over time. if v := os.Getenv("GARBLE_TEST_BUILDSETTINGS"); v != "" { var extra []debug.BuildSetting if err := json.Unmarshal([]byte(v), &extra); err != nil { panic(err) } info.Settings = append(info.Settings, extra...) } var vcsTime time.Time var vcsRevision string for _, setting := range info.Settings { switch setting.Key { case "vcs.time": // If the format is invalid, we'll print a zero timestamp. vcsTime, _ = time.Parse(time.RFC3339Nano, setting.Value) case "vcs.revision": vcsRevision = setting.Value if len(vcsRevision) > 12 { vcsRevision = vcsRevision[:12] } } } if vcsRevision != "" { return module.PseudoVersion("", "", vcsTime, vcsRevision) } return fallbackVersion } func goVersion() string { // For the tests, as we don't want the Go version to change over time. if testVersion := os.Getenv("GO_VERSION_TEST"); testVersion != "" { return testVersion } return runtime.Version() } func String(injected string) string { if injected != "" { return fmt.Sprintf("%s (%s)", injected, goVersion()) } return fmt.Sprintf("%s (%s)", gofumptVersion(), goVersion()) } gofumpt-0.6.0/main_test.go000066400000000000000000000023261455555333400155270ustar00rootroot00000000000000// Copyright (c) 2019, Daniel Martí // See LICENSE for licensing information package main import ( "encoding/json" "flag" "os" "os/exec" "path/filepath" "testing" qt "github.com/frankban/quicktest" "github.com/rogpeppe/go-internal/gotooltest" "github.com/rogpeppe/go-internal/testscript" ) func TestMain(m *testing.M) { os.Exit(testscript.RunMain(m, map[string]func() int{ "gofumpt": main1, })) } var update = flag.Bool("u", false, "update testscript output files") func TestScript(t *testing.T) { t.Parallel() var goEnv struct { GOCACHE string GOMODCACHE string GOMOD string } out, err := exec.Command("go", "env", "-json").CombinedOutput() if err != nil { t.Fatal(err) } if err := json.Unmarshal(out, &goEnv); err != nil { t.Fatal(err) } p := testscript.Params{ Dir: filepath.Join("testdata", "script"), UpdateScripts: *update, RequireExplicitExec: true, Setup: func(env *testscript.Env) error { env.Setenv("GOCACHE", goEnv.GOCACHE) env.Setenv("GOMODCACHE", goEnv.GOMODCACHE) env.Setenv("GOMOD_DIR", filepath.Dir(goEnv.GOMOD)) return nil }, } err = gotooltest.Setup(&p) qt.Assert(t, err, qt.IsNil) testscript.Run(t, p) } gofumpt-0.6.0/testdata/000077500000000000000000000000001455555333400150235ustar00rootroot00000000000000gofumpt-0.6.0/testdata/gofumpt-external/000077500000000000000000000000001455555333400203245ustar00rootroot00000000000000gofumpt-0.6.0/testdata/gofumpt-external/go.mod000066400000000000000000000004161455555333400214330ustar00rootroot00000000000000module test/gofumpt-external go 1.19 require mvdan.cc/gofumpt v0.3.2-0.20220627183521-8dda8068d9f3 require ( github.com/google/go-cmp v0.5.8 // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/tools v0.1.11 // indirect ) gofumpt-0.6.0/testdata/gofumpt-external/go.sum000066400000000000000000000022671455555333400214660ustar00rootroot00000000000000github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/rogpeppe/go-internal v1.8.2-0.20220624104257-af73bbc5c731 h1:LkP6LNQyXrQoVVXMpb+sbb0iibcaOZH97feeh778heA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/sys v0.0.0-20220624220833-87e55d714810 h1:rHZQSjJdAI4Xf5Qzeh2bBc5YJIkPFVM6oDtMFYmgws0= golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= mvdan.cc/gofumpt v0.3.2-0.20220627183521-8dda8068d9f3 h1:Kk6yeaipDobigAr+NZqq37LjLN1q3LHSOpcaDLDwyoI= mvdan.cc/gofumpt v0.3.2-0.20220627183521-8dda8068d9f3/go.mod h1:TSc7K1qXnyCCK7LUmDAWp4UMntOys3CzN8ksMKaxFrE= gofumpt-0.6.0/testdata/gofumpt-external/main.go000066400000000000000000000004461455555333400216030ustar00rootroot00000000000000package main import ( "io" "os" "mvdan.cc/gofumpt/format" ) func main() { orig, err := io.ReadAll(os.Stdin) if err != nil { panic(err) } formatted, err := format.Source(orig, format.Options{ LangVersion: "v1.16", }) if err != nil { panic(err) } os.Stdout.Write(formatted) } gofumpt-0.6.0/testdata/script/000077500000000000000000000000001455555333400163275ustar00rootroot00000000000000gofumpt-0.6.0/testdata/script/assignment-newlines.txtar000066400000000000000000000007631455555333400234130ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p func f() { foo := "bar" foo := "bar" _, _ = 0, 1 _, _ = 0, 1 _ = ` foo ` _ = /* inline */ "foo" _ = // inline "foo" } -- foo.go.golden -- package p func f() { foo := "bar" foo := "bar" _, _ = 0, 1 _, _ = 0, 1 _ = ` foo ` _ = /* inline */ "foo" _ = // inline "foo" } gofumpt-0.6.0/testdata/script/block-empty.txtar000066400000000000000000000012011455555333400216330ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p func f() { if true { // lone comment } { } { // lone comment } type S struct { // lone comment } type I interface { // lone comment } } type SOut struct { // lone comment } type IOut interface { // lone comment } -- foo.go.golden -- package p func f() { if true { // lone comment } { } { // lone comment } type S struct { // lone comment } type I interface { // lone comment } } type SOut struct { // lone comment } type IOut interface { // lone comment } gofumpt-0.6.0/testdata/script/block-single.txtar000066400000000000000000000021601455555333400217630ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p func f() { if true { println() } for true { println() } { println(1, 2, 3, 4, `foo bar`) } { // comment directly before println() // comment after } { // comment before println() // comment directly after } // For readability; the empty line helps separate the multi-line // condition from the body. if true && true { println() } for true && true { println() } if true && true { // documented single statement println() } } -- foo.go.golden -- package p func f() { if true { println() } for true { println() } { println(1, 2, 3, 4, `foo bar`) } { // comment directly before println() // comment after } { // comment before println() // comment directly after } // For readability; the empty line helps separate the multi-line // condition from the body. if true && true { println() } for true && true { println() } if true && true { // documented single statement println() } } gofumpt-0.6.0/testdata/script/cgo.txtar000066400000000000000000000006051455555333400201640ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p import "C" import "os" import `C` import "os" import "C" import ( "io" "utf8" ) import `C` import ( "io" "utf8" ) -- foo.go.golden -- package p import "C" import "os" import "C" import "os" import "C" import ( "io" "utf8" ) import "C" import ( "io" "utf8" ) gofumpt-0.6.0/testdata/script/comment-spaced.txtar000066400000000000000000000031431455555333400223130ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- //go:build tag // +build tag package p //go:generate some command //go:unknowndirective //lint:disablefoo //go-sumtype:decl Foo //nolint //nolint // explanation //nolint:somelinter // explanation //NOSONAR //NOSONAR // explanation //noinspection ALL //noinspection foo,bar //not actually: a directive //https://just.one/url //TODO: do something //export CgoFunc //extern open func c_open(name *byte, mode int, perm int) int //line 123 //sys Unlink(path string) (err error) //sysnb Getpid() (pid int) //foo is foo. type foo int // comment with a tab. // comment with many spaces //comment group //123 numbers too // comment group //123 numbers too //{ //this is probably code //} //////////// // ascii art //---------- // -- foo.go.golden -- //go:build tag // +build tag package p //go:generate some command //go:unknowndirective //lint:disablefoo //go-sumtype:decl Foo //nolint //nolint // explanation //nolint:somelinter // explanation //NOSONAR //NOSONAR // explanation //noinspection ALL //noinspection foo,bar // not actually: a directive // https://just.one/url // TODO: do something //export CgoFunc //extern open func c_open(name *byte, mode int, perm int) int //line 123 //sys Unlink(path string) (err error) //sysnb Getpid() (pid int) // foo is foo. type foo int // comment with a tab. // comment with many spaces // comment group // 123 numbers too // comment group // 123 numbers too //{ //this is probably code //} //////////// // ascii art //---------- // gofumpt-0.6.0/testdata/script/composite-literals-leading-lines.txtar000066400000000000000000000020231455555333400257400ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p var _ = []string{ "foo", } var _ = []string{ "foo", } var _ = []string{ // joint comment "foo", } var _ = []string{ // separate comment "foo", } var _ = map[string]string{ "foo": "bar", } var _ = map[string]string{ "foo": "bar", } var _ = map[string]string{ // joint comment "foo": "bar", } var _ = map[string]string{ // separate comment "foo": "bar", } var _ = map[string]string{ /* joint comment */ "foo": "bar", } -- foo.go.golden -- package p var _ = []string{ "foo", } var _ = []string{ "foo", } var _ = []string{ // joint comment "foo", } var _ = []string{ // separate comment "foo", } var _ = map[string]string{ "foo": "bar", } var _ = map[string]string{ "foo": "bar", } var _ = map[string]string{ // joint comment "foo": "bar", } var _ = map[string]string{ // separate comment "foo": "bar", } var _ = map[string]string{ /* joint comment */ "foo": "bar", } gofumpt-0.6.0/testdata/script/composite-multiline.txtar000066400000000000000000000025011455555333400234130ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p var _ = []int{} var _ = []int{ } var _ = []int{1, 2, 3, 4} var _ = []int{ 1, 2, 3, 4} var _ = [][]string{{ "no need for more newlines", "if wrapping a single expression", }} var _ = []string{` no need for newlines `, ` if no elements are surrounded by newlines `} var _ = []struct{ a int }{ { // consistent a: 1, }, { a: 2, }, { // inconsistent a: 3, }, } var _ = []struct{ a int }{{ a: 1, }, { a: 2, }, { a: 3, }} var _ interface{ } func _(struct{ }) var _ = []interface { }{1, 2, 3} func _( ) type T struct { Foo // comment Bar struct { // comment } } -- foo.go.golden -- package p var _ = []int{} var _ = []int{} var _ = []int{ 1, 2, 3, 4, } var _ = []int{ 1, 2, 3, 4, } var _ = [][]string{{ "no need for more newlines", "if wrapping a single expression", }} var _ = []string{` no need for newlines `, ` if no elements are surrounded by newlines `} var _ = []struct{ a int }{ { // consistent a: 1, }, { a: 2, }, { // inconsistent a: 3, }, } var _ = []struct{ a int }{{ a: 1, }, { a: 2, }, { a: 3, }} var _ interface{} func _(struct{}) var _ = []interface{}{1, 2, 3} func _() type T struct { Foo // comment Bar struct { // comment } } gofumpt-0.6.0/testdata/script/decl-group-many.txtar000066400000000000000000000021201455555333400224110ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p var single = "foo" var another = "bar" const one = 'q' const two = 'w' const three = 'e' const four = 'r' var not = 'a' var v1 = 's' //go:embed hello.txt var v2 = 'd' var v1 = 's' // comment line 1 // comment line 2 var v2 = 'd' var v1 = "mixed" const c1 = "mixed" //go:embed hello.txt var v1 = 's' var v2 = 'd' var v3 = 'd' // comment var v1 = 's' var v2 = 'd' /* comment */ var v3 = 'd' const inline1 = "s1" // c1 const inline2 = "s2" // c2 const inline3 = "s3" // c3 -- foo.go.golden -- package p var ( single = "foo" another = "bar" ) const ( one = 'q' two = 'w' three = 'e' four = 'r' ) var not = 'a' var v1 = 's' //go:embed hello.txt var v2 = 'd' var ( v1 = 's' // comment line 1 // comment line 2 v2 = 'd' ) var v1 = "mixed" const c1 = "mixed" //go:embed hello.txt var v1 = 's' var ( v2 = 'd' v3 = 'd' ) // comment var ( v1 = 's' v2 = 'd' /* comment */ v3 = 'd' ) const ( inline1 = "s1" // c1 inline2 = "s2" // c2 inline3 = "s3" // c3 ) gofumpt-0.6.0/testdata/script/decl-group-single.txtar000066400000000000000000000017321455555333400227360ustar00rootroot00000000000000exec gofumpt -w f1.go f2.go cmp f1.go f1.go.golden cmp f2.go f2.go.golden exec gofumpt -d f1.go.golden f2.go.golden ! stdout . -- f1.go -- package p import "non-grouped" import ( "grouped" ) var single = "foo" var ( // verbose is verbose. verbose = "bar" ) // This entire block has a comment. var ( groupComment = "bar" ) var ( multiple1 string multiple2 string ) const ( first = iota ) var ( multiline = []string{ "foo", "bar", } ) var ( foo = "foo" // bar = "bar" // baz = "baz" ) -- f1.go.golden -- package p import "non-grouped" import ( "grouped" ) var single = "foo" // verbose is verbose. var verbose = "bar" // This entire block has a comment. var ( groupComment = "bar" ) var ( multiple1 string multiple2 string ) const ( first = iota ) var multiline = []string{ "foo", "bar", } var foo = "foo" // bar = "bar" // baz = "baz" -- f2.go -- package p func _() { var ( _ int ) } -- f2.go.golden -- package p func _() { var _ int } gofumpt-0.6.0/testdata/script/decls-separated.txtar000066400000000000000000000015201455555333400224510ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p func f1() { println("single line") } func f2() { println("single line") } func f3() { println("multiline") } func f4() { println("multiline") } // l1 is a var. var l1 = []int{ 1, 2, } // l2 is a var. var l2 = []int{ 3, 4, } var ( s3 = ` ok if grouped together ` s4 = ` ok if grouped together ` ) var _ = "ok if either isn't multiline" -- foo.go.golden -- package p func f1() { println("single line") } func f2() { println("single line") } func f3() { println("multiline") } func f4() { println("multiline") } // l1 is a var. var l1 = []int{ 1, 2, } // l2 is a var. var l2 = []int{ 3, 4, } var ( s3 = ` ok if grouped together ` s4 = ` ok if grouped together ` ) var _ = "ok if either isn't multiline" gofumpt-0.6.0/testdata/script/deprecated-flags.txtar000066400000000000000000000006541455555333400226120ustar00rootroot00000000000000cp foo.orig.go foo.go ! exec gofumpt -w -r foo foo.go stderr 'the rewrite flag is no longer available; use "gofmt -r" instead\n' cmp foo.orig.go foo.go exec gofumpt -w -s foo.go stderr 'warning: -s is deprecated as it is always enabled\n' cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.orig.go -- package p func f() { println("foo") } -- foo.go.golden -- package p func f() { println("foo") } gofumpt-0.6.0/testdata/script/diagnose.txtar000066400000000000000000000061701455555333400212100ustar00rootroot00000000000000env GO_VERSION_TEST=go1.18.29 # First, test a local build of gofumpt resulting from 'git clone'. # Its version will be inferred from VCS, but since we want a stable test, # we mock the VCS information. Note that test binaries do not have VCS info. # Data obtained from a real build while developing. env GARBLE_TEST_BUILDSETTINGS='[{"Key":"vcs","Value":"git"},{"Key":"vcs.revision","Value":"8dda8068d9f339047fc1777b688afb66a0a0db17"},{"Key":"vcs.time","Value":"2022-07-27T15:58:40Z"},{"Key":"vcs.modified","Value":"true"}]' exec gofumpt foo.go cmp stdout foo.go.golden exec gofumpt outdated.go cmp stdout foo.go.golden exec gofumpt -extra foo.go cmp stdout foo.go.golden-extra exec gofumpt -lang=1.0 foo.go cmp stdout foo.go.golden-lang exec gofumpt -d nochange.go ! stdout . exec gofumpt -d foo.go.golden ! stdout . exec gofumpt -d -extra foo.go.golden-extra ! stdout . # A local build without VCS information will result in a missing version. env GARBLE_TEST_BUILDSETTINGS='[]' exec gofumpt foo.go cmp stdout foo.go.golden-devel [short] stop 'the rest of this test builds gofumpt binaries' # We want a published version of gofumpt on the public module proxies, # because that's the only way that its module version will be included. # Using a directory replace directive will not work. # This means that any change in how gofumpt reports its own version # will require two pull requests, the second one updating the test script. # We could consider using go-internal/goproxytest, but then we would need to # manually run something like go-internal/cmd/txtar-addmod reguarly. # Or teach goproxytest to serve a mock version of gofumpt from its local checkout. # Either way, both are relatively overkill for now. env GOBIN=${WORK}/bin env GOFUMPT_PUBLISHED_VERSION=v0.3.2-0.20220627183521-8dda8068d9f3 # TODO: update these once the library fix hits master # gofumpt as the main binary with a real module version. go install mvdan.cc/gofumpt@${GOFUMPT_PUBLISHED_VERSION} exec ${GOBIN}/gofumpt foo.go cmp stdout foo.go.golden-released # gofumpt as a library with a real module version. cd ${GOMOD_DIR}/testdata/gofumpt-external go install . cd ${WORK} stdin foo.go exec ${GOBIN}/gofumpt-external cmp stdout foo.go.golden-external -- go.mod -- module test go 1.16 -- foo.go -- package p //gofumpt:diagnose -- outdated.go -- package p //gofumpt:diagnose v0.1.0 -- nochange.go -- package p //gofumpt:diagnosefoobar -- foo.go.golden -- package p //gofumpt:diagnose version: v0.0.0-20220727155840-8dda8068d9f3 (go1.18.29) flags: -lang=v1.16 -modpath=test -- foo.go.golden-devel -- package p //gofumpt:diagnose version: (devel) (go1.18.29) flags: -lang=v1.16 -modpath=test -- foo.go.golden-extra -- package p //gofumpt:diagnose version: v0.0.0-20220727155840-8dda8068d9f3 (go1.18.29) flags: -lang=v1.16 -modpath=test -extra -- foo.go.golden-lang -- package p //gofumpt:diagnose version: v0.0.0-20220727155840-8dda8068d9f3 (go1.18.29) flags: -lang=v1.0 -modpath=test -- foo.go.golden-released -- package p //gofumpt:diagnose v0.3.2-0.20220627183521-8dda8068d9f3 -lang=v1.16 -modpath=test -- foo.go.golden-external -- package p //gofumpt:diagnose (devel) -lang=v1.16 -modpath= gofumpt-0.6.0/testdata/script/func-merge-parameters.txtar000066400000000000000000000024061455555333400236060ustar00rootroot00000000000000# By default, this rule isn't enabled. exec gofumpt foo.go cmp stdout foo.go # It's run with -extra. exec gofumpt -extra foo.go cmp stdout foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p type f func(x int, y int) int type i interface { add(x int, y int) } type s struct { x int y int } func mergeAdjacent(x int, y int) {} func mergeThreeAdjacent(x int, y int, z int) {} func mergeOneWithTwo(x, y int, z int) {} func mergeTwoWithOne(x int, y, z int) {} func mergeWithComment( x int, y int, // comment ) func mergeAllSyntax(x chan []*foo.Bar, y chan []*foo.Bar) {} func dontMergeAnonymousParams(int, int) {} func dontMergeMultipleLines( x int, y int, ) { } func dontMergeMultipleLines2( x, y int, z int, ) { } -- foo.go.golden -- package p type f func(x, y int) int type i interface { add(x, y int) } type s struct { x int y int } func mergeAdjacent(x, y int) {} func mergeThreeAdjacent(x, y, z int) {} func mergeOneWithTwo(x, y, z int) {} func mergeTwoWithOne(x, y, z int) {} func mergeWithComment( x, y int, // comment ) func mergeAllSyntax(x, y chan []*foo.Bar) {} func dontMergeAnonymousParams(int, int) {} func dontMergeMultipleLines( x int, y int, ) { } func dontMergeMultipleLines2( x, y int, z int, ) { } gofumpt-0.6.0/testdata/script/func-newlines.txtar000066400000000000000000000137741455555333400222040ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p func f1() { println("multiple") println("statements") } func f2() { // comment directly before println() // comment after } func _() { f3 := func() { println() } } func multilineParams(p1 string, p2 string) { println("body") } func multilineParamsUnambiguous(p1 string, p2 string, ) { println("body") } func multilineParamsListNoReturn( p1 string, p2 string, ) { println("body") } func multilineParamsListReturningNamedSingleValue( p1 string, p2 string, ) (err error) { println("body") return err } func multilineParamsListReturningSingleValue( p1 string, p2 string, ) error { println("body") return nil } func multilineParamsListReturningNamedMultiValues( p1 string, p2 string, ) (s string, err error) { println("body") return s, err } func multilineParamsListReturningMultiValues( p1 string, p2 string, ) (string, error) { println("body") return "", nil } func multilineParamsListReturningNamedMultiLineValuesList( p1 string, p2 string, ) ( s string, err error, ) { println("body") return s, err } func multilineParamsListReturningMultiLineValues( p1 string, p2 string, ) ( string, error, ) { println("body") return "", nil } func multilineParamsOneParamNoReturn( p1 string, ) { println("body") } func multilineParamsOneParamReturningNamedSingleValue( p1 string, ) (err error) { println("body") return err } func multilineParamsOneParamReturningSingleValue( p1 string, ) error { println("body") return nil } func multilineParamsOneParamReturningNamedMultiValues( p1 string, ) (s string, err error) { println("body") return s, err } func multilineParamsOneParamReturningMultiValues( p1 string, ) (string, error) { println("body") return "", nil } func multilineParamsOneParamReturningNamedMultiLineValuesList( p1 string, ) ( s string, err error, ) { println("body") return s, err } func multilineParamsOneParamReturningMultiLineValues( p1 string, ) ( string, error, ) { println("body") return "", nil } func multilineResults() (p1 string, p2 string) { println("body") } func multilineResultsUnambiguous() (p1 string, p2 string, ) { println("body") } func multilineNoFields( ) { println("body") } func f( foo int, bar string, /* baz */) { body() } func f2( foo int, bar string, ) ( string, error, /* baz */) { return "", nil } func multilineResultsMultipleEmptyLines() (p1 string, p2 string) { println("body") } func multilineParamsWithoutEmptyLine(p1 string, p2 string) { println("body") } func multilineParamsWithoutEmptyLineWithComment(p1 string, p2 string) { // comment println("body") } // Same as the others above, but with a single result parameter without // parentheses. This used to cause token.File.Offset crashes. func f(p1 string, p2 string) int { println("body") return 0 } func a() { f := func(s string, b bool, ) { // foo } } func f(p1 string, p2 string) (int, string, /* baz */) { println("body") return 0, "" } -- foo.go.golden -- package p func f1() { println("multiple") println("statements") } func f2() { // comment directly before println() // comment after } func _() { f3 := func() { println() } } func multilineParams(p1 string, p2 string, ) { println("body") } func multilineParamsUnambiguous(p1 string, p2 string, ) { println("body") } func multilineParamsListNoReturn( p1 string, p2 string, ) { println("body") } func multilineParamsListReturningNamedSingleValue( p1 string, p2 string, ) (err error) { println("body") return err } func multilineParamsListReturningSingleValue( p1 string, p2 string, ) error { println("body") return nil } func multilineParamsListReturningNamedMultiValues( p1 string, p2 string, ) (s string, err error) { println("body") return s, err } func multilineParamsListReturningMultiValues( p1 string, p2 string, ) (string, error) { println("body") return "", nil } func multilineParamsListReturningNamedMultiLineValuesList( p1 string, p2 string, ) ( s string, err error, ) { println("body") return s, err } func multilineParamsListReturningMultiLineValues( p1 string, p2 string, ) ( string, error, ) { println("body") return "", nil } func multilineParamsOneParamNoReturn( p1 string, ) { println("body") } func multilineParamsOneParamReturningNamedSingleValue( p1 string, ) (err error) { println("body") return err } func multilineParamsOneParamReturningSingleValue( p1 string, ) error { println("body") return nil } func multilineParamsOneParamReturningNamedMultiValues( p1 string, ) (s string, err error) { println("body") return s, err } func multilineParamsOneParamReturningMultiValues( p1 string, ) (string, error) { println("body") return "", nil } func multilineParamsOneParamReturningNamedMultiLineValuesList( p1 string, ) ( s string, err error, ) { println("body") return s, err } func multilineParamsOneParamReturningMultiLineValues( p1 string, ) ( string, error, ) { println("body") return "", nil } func multilineResults() (p1 string, p2 string, ) { println("body") } func multilineResultsUnambiguous() (p1 string, p2 string, ) { println("body") } func multilineNoFields() { println("body") } func f( foo int, bar string, /* baz */ ) { body() } func f2( foo int, bar string, ) ( string, error, /* baz */ ) { return "", nil } func multilineResultsMultipleEmptyLines() (p1 string, p2 string, ) { println("body") } func multilineParamsWithoutEmptyLine(p1 string, p2 string, ) { println("body") } func multilineParamsWithoutEmptyLineWithComment(p1 string, p2 string, ) { // comment println("body") } // Same as the others above, but with a single result parameter without // parentheses. This used to cause token.File.Offset crashes. func f(p1 string, p2 string, ) int { println("body") return 0 } func a() { f := func(s string, b bool, ) { // foo } } func f(p1 string, p2 string) (int, string, /* baz */) { println("body") return 0, "" } gofumpt-0.6.0/testdata/script/generated.txtar000066400000000000000000000013331455555333400213510ustar00rootroot00000000000000# Explicitly given generated files are formatted with our rules. exec gofumpt foo.go cmp stdout foo.go.golden # stdin is still considered an explicit file. stdin foo.go exec gofumpt cmp stdout foo.go.golden # Implicitly walked generated files get formatted without the added rules. exec gofumpt -l . stdout -count=1 '^badgofmt.go$' ! stdout '^foo.go$' ! stderr . -- badgofmt.go -- // Code generated by foo. DO NOT EDIT. package foo func f() { println("body") } -- foo.go -- // foo is a package about bar. // Code generated by foo. DO NOT EDIT. package foo func f() { println("body") } -- foo.go.golden -- // foo is a package about bar. // Code generated by foo. DO NOT EDIT. package foo func f() { println("body") } gofumpt-0.6.0/testdata/script/gomod.txtar000066400000000000000000000007551455555333400205270ustar00rootroot00000000000000# Test various edge cases with go.mod files. [!go1.21] skip 'Go 1.20 or older cannot parse these go.mod files' exec gofumpt toolchain-stable/a.go stdout '//gofumpt:diagnose.* -lang=v1.21' exec gofumpt toolchain-unstable/a.go stdout '//gofumpt:diagnose.* -lang=v1.21' -- toolchain-stable/go.mod -- module a go 1.21.2 -- toolchain-stable/a.go -- package a //gofumpt:diagnose -- toolchain-unstable/go.mod -- module a go 1.21rc3 -- toolchain-unstable/a.go -- package a //gofumpt:diagnose gofumpt-0.6.0/testdata/script/ignore-dirs.txtar000066400000000000000000000022461455555333400216410ustar00rootroot00000000000000exec gofumpt orig.go.golden cp stdout formatted.go.golden mkdir -p vendor/foo testdata/foo cp orig.go.golden vendor/foo/foo.go cp orig.go.golden testdata/foo/foo.go # format explicit dirs exec gofumpt -l vendor testdata stdout -count=1 'vendor[/\\]foo[/\\]foo.go' stdout -count=1 'testdata[/\\]foo[/\\]foo.go' ! stderr . # format explicit files exec gofumpt -l vendor/foo/foo.go testdata/foo/foo.go stdout -count=1 'vendor[/\\]foo[/\\]foo.go' stdout -count=1 'testdata[/\\]foo[/\\]foo.go' ! stderr . # ignore implicit dirs via fs walking exec gofumpt -l . ! stdout . ! stderr . # format explicit pkg while ignoring rest mkdir vendor/ignore testdata/ignore cp orig.go.golden vendor/ignore/ignore.go cp orig.go.golden testdata/ignore/ignore.go exec gofumpt -l vendor/foo testdata/foo . stdout -count=1 'vendor[/\\]foo[/\\]foo.go' stdout -count=1 'testdata[/\\]foo[/\\]foo.go' ! stderr . # format explicit dirs without clean paths exec gofumpt -l $WORK//vendor ./testdata/./ stdout -count=1 'vendor[/\\]foo[/\\]foo.go' stdout -count=1 'testdata[/\\]foo[/\\]foo.go' ! stderr . -- orig.go.golden -- package p func f() { if true { // lone comment } { } { // lone comment } } gofumpt-0.6.0/testdata/script/interface.txtar000066400000000000000000000037121455555333400213560ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p type i1 interface { a(x int) int b(x int) int c(x int) int D() E() f() } type i2 interface { // comment for a a(x int) int // comment between a and b // comment for b b(x int) int // comment between b and c c(x int) int d(x int) int // comment for e e(x int) int } type i3 interface { a(x int) int // standalone comment b(x int) int } type leadingLine1 interface { a(x int) int } type leadingLine2 interface { a(x int) int } type leadingLine3 interface { // comment a(x int) int } type leadingLine4 interface { // comment a(x int) int } type leadingLine5 interface { // comment // comment for a a(x int) int } type leadingLine6 interface { // comment // comment for a a(x int) int } type leadingLine7 interface { // comment // comment for a a(x int) int } type leadingLine8 interface { // comment } type ii1 interface { DoA() DoB() UndoA() UndoB() } -- foo.go.golden -- package p type i1 interface { a(x int) int b(x int) int c(x int) int D() E() f() } type i2 interface { // comment for a a(x int) int // comment between a and b // comment for b b(x int) int // comment between b and c c(x int) int d(x int) int // comment for e e(x int) int } type i3 interface { a(x int) int // standalone comment b(x int) int } type leadingLine1 interface { a(x int) int } type leadingLine2 interface { a(x int) int } type leadingLine3 interface { // comment a(x int) int } type leadingLine4 interface { // comment a(x int) int } type leadingLine5 interface { // comment // comment for a a(x int) int } type leadingLine6 interface { // comment // comment for a a(x int) int } type leadingLine7 interface { // comment // comment for a a(x int) int } type leadingLine8 interface { // comment } type ii1 interface { DoA() DoB() UndoA() UndoB() } gofumpt-0.6.0/testdata/script/linedirectives.txtar000066400000000000000000000063071455555333400224320ustar00rootroot00000000000000# Line directives can throw off our use of MergeLines. # We should ignore them entirely when calculating line numbers. # The file below is borrowed from Go's test/dwarf/linedirectives.go. exec gofumpt -w foo.go cmp foo.go foo.go.golden -- foo.go -- // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //line foo/bar.y:4 package main //line foo/bar.y:60 func main() { //line foo/bar.y:297 f, l := 0, 0 //line yacctab:1 f, l = 1, 1 //line yaccpar:1 f, l = 2, 1 //line foo/bar.y:82 f, l = 3, 82 //line foo/bar.y:90 f, l = 3, 90 //line foo/bar.y:92 f, l = 3, 92 //line foo/bar.y:100 f, l = 3, 100 //line foo/bar.y:104 l = 104 //line foo/bar.y:112 l = 112 //line foo/bar.y:117 l = 117 //line foo/bar.y:121 l = 121 //line foo/bar.y:125 l = 125 //line foo/bar.y:133 l = 133 //line foo/bar.y:146 l = 146 //line foo/bar.y:148 //line foo/bar.y:153 //line foo/bar.y:155 l = 155 //line foo/bar.y:160 //line foo/bar.y:164 //line foo/bar.y:173 //line foo/bar.y:178 //line foo/bar.y:180 //line foo/bar.y:185 //line foo/bar.y:195 //line foo/bar.y:197 //line foo/bar.y:202 //line foo/bar.y:204 //line foo/bar.y:208 //line foo/bar.y:211 //line foo/bar.y:213 //line foo/bar.y:215 //line foo/bar.y:217 //line foo/bar.y:221 //line foo/bar.y:229 //line foo/bar.y:236 //line foo/bar.y:238 //line foo/bar.y:240 //line foo/bar.y:244 //line foo/bar.y:249 //line foo/bar.y:253 //line foo/bar.y:257 //line foo/bar.y:262 //line foo/bar.y:267 //line foo/bar.y:272 if l == f { //line foo/bar.y:277 panic("aie!") //line foo/bar.y:281 } //line foo/bar.y:285 return //line foo/bar.y:288 //line foo/bar.y:290 } //line foo/bar.y:293 //line foo/bar.y:295 -- foo.go.golden -- // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //line foo/bar.y:4 package main //line foo/bar.y:60 func main() { //line foo/bar.y:297 f, l := 0, 0 //line yacctab:1 f, l = 1, 1 //line yaccpar:1 f, l = 2, 1 //line foo/bar.y:82 f, l = 3, 82 //line foo/bar.y:90 f, l = 3, 90 //line foo/bar.y:92 f, l = 3, 92 //line foo/bar.y:100 f, l = 3, 100 //line foo/bar.y:104 l = 104 //line foo/bar.y:112 l = 112 //line foo/bar.y:117 l = 117 //line foo/bar.y:121 l = 121 //line foo/bar.y:125 l = 125 //line foo/bar.y:133 l = 133 //line foo/bar.y:146 l = 146 //line foo/bar.y:148 //line foo/bar.y:153 //line foo/bar.y:155 l = 155 //line foo/bar.y:160 //line foo/bar.y:164 //line foo/bar.y:173 //line foo/bar.y:178 //line foo/bar.y:180 //line foo/bar.y:185 //line foo/bar.y:195 //line foo/bar.y:197 //line foo/bar.y:202 //line foo/bar.y:204 //line foo/bar.y:208 //line foo/bar.y:211 //line foo/bar.y:213 //line foo/bar.y:215 //line foo/bar.y:217 //line foo/bar.y:221 //line foo/bar.y:229 //line foo/bar.y:236 //line foo/bar.y:238 //line foo/bar.y:240 //line foo/bar.y:244 //line foo/bar.y:249 //line foo/bar.y:253 //line foo/bar.y:257 //line foo/bar.y:262 //line foo/bar.y:267 //line foo/bar.y:272 if l == f { //line foo/bar.y:277 panic("aie!") //line foo/bar.y:281 } //line foo/bar.y:285 return //line foo/bar.y:288 //line foo/bar.y:290 } //line foo/bar.y:293 //line foo/bar.y:295 gofumpt-0.6.0/testdata/script/long-lines.txtar000066400000000000000000000140031455555333400214600ustar00rootroot00000000000000cp foo.go foo.go.orig exec gofumpt -w foo.go cmp foo.go foo.go.orig env GOFUMPT_SPLIT_LONG_LINES=on exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p func _() { if err := f(argument1, argument2, argument3, argument4, argument5, argument6, argument7, argument8, argument9, argument10); err != nil { panic(err) } // Tiny arguments to ensure the length calculation is right. if err := f(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); err != nil { panic(err) } // These wouldn't take significantly less horizontal space if split. f(x, "one single very very very very very very very very very very very very very very very very long literal") if err := f(x, "one single very very very very very very very very very very very very very very very very long literal"); err != nil { panic(err) } { { { { println("first", "one single very very very very very very very very very very very very very long literal") } } } } // Allow splitting at the start of sub-lists too. if err := f(argument1, argument2, argument3, argument4, someComplex{argument5, argument6, argument7, argument8, argument9, argument10}); err != nil { panic(err) } if err := f(argument1, argument2, argument3, argument4, &someComplex{argument5, argument6, argument7, argument8, argument9, argument10}); err != nil { panic(err) } if err := f(argument1, argument2, argument3, argument4, []someSlice{argument5, argument6, argument7, argument8, argument9, argument10}); err != nil { panic(err) } // Allow splitting "lists" of binary expressions. if boolean1 && boolean2 && boolean3 && boolean4 && boolean5 && boolean6 && boolean7 && boolean8 && boolean9 && boolean10 && boolean11 { } // Over 100, and we split in a way that doesn't break "len(" off. if boolean1 || boolean2 || boolean3 || boolean4 || len(someVeryLongVarName.SomeVeryLongSelector) > 0 { } } // Note that function declarations have a higher limit of 120. // This line goes beyond the limit of 120, but splitting it would leave the // following line with just 20 non-indentation characters. Not worth it. func LongButNotWorthSplitting(argument1, argument2, argument3, argument4, argument5, argument6, argument7, argument8, argument9 int) bool { } // This line goes well past the limit and it should be split. // Note that it has a nested func type in a parameter. func TooLongWithFuncParam(fn func(int) (int, error), argument1, argument2, argument3, argument4, argument5, argument6, argument7, argument8, argument9, argument10 int) bool { } // This is like LongButNotWorthSplitting, but with a func parameter. func LongButNotWorthSplitting2(fn func(int) (int, error), argument3, argument4, argument5, argument6, argument7, argument8, argument9 int) bool { } // Never split result parameter lists, as that could easily add confusion with // extra input parameters. func NeverSplitResults(argument1, argument2, argument3, argument4, argument5 int) (result1 int, result2, result3, result4, result5, result6, result7, result8 bool) { } -- foo.go.golden -- package p func _() { if err := f(argument1, argument2, argument3, argument4, argument5, argument6, argument7, argument8, argument9, argument10); err != nil { panic(err) } // Tiny arguments to ensure the length calculation is right. if err := f(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); err != nil { panic(err) } // These wouldn't take significantly less horizontal space if split. f(x, "one single very very very very very very very very very very very very very very very very long literal") if err := f(x, "one single very very very very very very very very very very very very very very very very long literal"); err != nil { panic(err) } { { { { println("first", "one single very very very very very very very very very very very very very long literal") } } } } // Allow splitting at the start of sub-lists too. if err := f(argument1, argument2, argument3, argument4, someComplex{ argument5, argument6, argument7, argument8, argument9, argument10, }); err != nil { panic(err) } if err := f(argument1, argument2, argument3, argument4, &someComplex{ argument5, argument6, argument7, argument8, argument9, argument10, }); err != nil { panic(err) } if err := f(argument1, argument2, argument3, argument4, []someSlice{ argument5, argument6, argument7, argument8, argument9, argument10, }); err != nil { panic(err) } // Allow splitting "lists" of binary expressions. if boolean1 && boolean2 && boolean3 && boolean4 && boolean5 && boolean6 && boolean7 && boolean8 && boolean9 && boolean10 && boolean11 { } // Over 100, and we split in a way that doesn't break "len(" off. if boolean1 || boolean2 || boolean3 || boolean4 || len(someVeryLongVarName.SomeVeryLongSelector) > 0 { } } // Note that function declarations have a higher limit of 120. // This line goes beyond the limit of 120, but splitting it would leave the // following line with just 20 non-indentation characters. Not worth it. func LongButNotWorthSplitting(argument1, argument2, argument3, argument4, argument5, argument6, argument7, argument8, argument9 int) bool { } // This line goes well past the limit and it should be split. // Note that it has a nested func type in a parameter. func TooLongWithFuncParam(fn func(int) (int, error), argument1, argument2, argument3, argument4, argument5, argument6, argument7, argument8, argument9, argument10 int) bool { } // This is like LongButNotWorthSplitting, but with a func parameter. func LongButNotWorthSplitting2(fn func(int) (int, error), argument3, argument4, argument5, argument6, argument7, argument8, argument9 int) bool { } // Never split result parameter lists, as that could easily add confusion with // extra input parameters. func NeverSplitResults(argument1, argument2, argument3, argument4, argument5 int) (result1 int, result2, result3, result4, result5, result6, result7, result8 bool) { } gofumpt-0.6.0/testdata/script/missing-import.txtar000066400000000000000000000002171455555333400223740ustar00rootroot00000000000000# A missing import shouldn't matter nor be fixed by gofumpt. exec gofumpt foo.go cmp stdout foo.go -- foo.go -- package p var _ bytes.Buffer gofumpt-0.6.0/testdata/script/newline-errcheck.txtar000066400000000000000000000022671455555333400226470ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p var Do1 func() error var Do2 func() (int, error) func f() { n1, err := Do2() if err != nil { panic(err) } if n2, err := Do2(); err != nil { panic(err) } n3, err := Do2() if err != nil { panic(err) } select { default: err := Do1() if err != nil { panic(err) } } n4, err := Do2() if err != nil && err.Error() == "complex condition" { panic(err) } err1 := Do1() if err != nil { panic(err) } { if err != nil { panic(err) } } n5, err = Do2() if err != nil { panic(err) } } -- foo.go.golden -- package p var Do1 func() error var Do2 func() (int, error) func f() { n1, err := Do2() if err != nil { panic(err) } if n2, err := Do2(); err != nil { panic(err) } n3, err := Do2() if err != nil { panic(err) } select { default: err := Do1() if err != nil { panic(err) } } n4, err := Do2() if err != nil && err.Error() == "complex condition" { panic(err) } err1 := Do1() if err != nil { panic(err) } { if err != nil { panic(err) } } n5, err = Do2() if err != nil { panic(err) } } gofumpt-0.6.0/testdata/script/octal-literals.txtar000066400000000000000000000016321455555333400223340ustar00rootroot00000000000000# Initially, the Go language version is too low. exec gofumpt -l . ! stdout . # We can give an explicitly newer version. exec gofumpt -lang=1.13 -l . stdout -count=1 'foo\.go' stdout -count=1 'nested[/\\]nested\.go' # If we bump the version in go.mod, it should be picked up. exec go mod edit -go=1.13 exec gofumpt -l . stdout -count=1 'foo\.go' ! stdout 'nested' # Ensure we produce the output we expect, and that it's stable. exec gofumpt foo.go cmp stdout foo.go.golden exec gofumpt -d foo.go.golden ! stdout . # We can give an explicitly older version, too exec gofumpt -lang=1.0 -l . ! stdout . -- go.mod -- module test go 1.12 -- foo.go -- package p const ( i = 0 j = 022 k = 0o_7_5_5 l = 1022 ) -- foo.go.golden -- package p const ( i = 0 j = 0o22 k = 0o_7_5_5 l = 1022 ) -- nested/go.mod -- module nested go 1.11 -- nested/nested.go -- package p const ( i = 0 j = 022 k = 0o_7_5_5 l = 1022 ) gofumpt-0.6.0/testdata/script/short-case.txtar000066400000000000000000000041141455555333400214630ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p func f(r rune) { switch r { case 'a', 'b', 'c': case 'd', 'e', 'f': case 'a', 'b', 'c': case 'v', 'e', 'r', 'y', 'l', 'o', 'n', 'g', 'l', 'i', 's', 't', '.', '.', '.': // before case 'a', 'b': // inline // after case 'a', // middle 'b': case 'a', 'b', 'c', 'd', 'e', 'f', 'g': // very very long inline comment at the end case 'a', 'b', 'c', 'd': // short comment } { { { { { switch r { case 'i', 'n', 'd', 'e', 'n', 't', 'e', 'd': } } } } } } } func s(x int) { switch x { case shortConstant1, shortConstant2: // A comment. fmt.Println(x) case shortConstant3, shortConstant4: // Do nothing. default: // Another comment. fmt.Println(x * 2) } } func s(x int) { switch x { case longerConstantName1, longerConstantName2: // A comment. fmt.Println(x) case longerConstantName3, longerConstantName4: // Do nothing. default: // Another comment. fmt.Println(x * 2) } } -- foo.go.golden -- package p func f(r rune) { switch r { case 'a', 'b', 'c': case 'd', 'e', 'f': case 'a', 'b', 'c': case 'v', 'e', 'r', 'y', 'l', 'o', 'n', 'g', 'l', 'i', 's', 't', '.', '.', '.': // before case 'a', 'b': // inline // after case 'a', // middle 'b': case 'a', 'b', 'c', 'd', 'e', 'f', 'g': // very very long inline comment at the end case 'a', 'b', 'c', 'd': // short comment } { { { { { switch r { case 'i', 'n', 'd', 'e', 'n', 't', 'e', 'd': } } } } } } } func s(x int) { switch x { case shortConstant1, shortConstant2: // A comment. fmt.Println(x) case shortConstant3, shortConstant4: // Do nothing. default: // Another comment. fmt.Println(x * 2) } } func s(x int) { switch x { case longerConstantName1, longerConstantName2: // A comment. fmt.Println(x) case longerConstantName3, longerConstantName4: // Do nothing. default: // Another comment. fmt.Println(x * 2) } } gofumpt-0.6.0/testdata/script/short-decl.txtar000066400000000000000000000007621455555333400214640ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- foo.go -- package p var global = x func f() { var local = x var local2, local3 = x, y var onlyType T var typeAndVar T = x var _ = unused var ( aligned = x vars = y here = y ) } -- foo.go.golden -- package p var global = x func f() { local := x local2, local3 := x, y var onlyType T var typeAndVar T = x _ = unused var ( aligned = x vars = y here = y ) } gofumpt-0.6.0/testdata/script/simplify.txtar000066400000000000000000000005261455555333400212520ustar00rootroot00000000000000# gofumpt changes -s to default to true. exec gofumpt foo.go cmp stdout foo.go.golden -- foo.go -- package p const () const ( // Comment ) type () type ( // Comment ) var () var ( // Comment ) var _ = [][]int{[]int{1}} -- foo.go.golden -- package p const ( // Comment ) type ( // Comment ) var ( // Comment ) var _ = [][]int{{1}} gofumpt-0.6.0/testdata/script/std-imports.txtar000066400000000000000000000047771455555333400217170ustar00rootroot00000000000000exec gofumpt -w foo.go cmp foo.go foo.go.golden exec gofumpt -d foo.go.golden ! stdout . -- go.mod -- module nodomainmod/mod1 go 1.16 -- foo.go -- package p import ( "io" "io/ioutil" // if the user keeps them in the top group, obey that _ "io/ioutil" _ "image/png" "bufio" // the above is for a side effect; this one has a comment ) import ( "os" "foo.local/one" bytes_ "bytes" "io" ) import ( "foo.local/two" "fmt" ) // If they are in order, but with extra newlines, join them. import ( "more" "std" ) // We need to split std vs non-std in this case too. import ( "foo.local" "foo.local/three" math "math" ) import ( "x" // don't mess up this comment "y" // or many // of them "z" ) // This used to crash gofumpt, as there's no space to insert an extra newline. import ( "std" "non.std/pkg" ) // All of the extra imports below are known to not belong in std. // For example/ and test/, see https://golang.org/issue/37641. import ( "io" "example/foo" "internal/bar" "test/baz" ) import ( "io" "nodomainmod" "nodomainmod/mod1/pkg1" "nodomainmod/mod2" "nodomainmodextra" ) import ( "io" "nodomainother/mod.withdot/pkg1" ) // TODO: fix issue 225. import ( "path/filepath" "time" "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/yaml" ) -- foo.go.golden -- package p import ( "io" "io/ioutil" // if the user keeps them in the top group, obey that _ "io/ioutil" _ "image/png" "bufio" // the above is for a side effect; this one has a comment ) import ( "io" "os" "foo.local/one" bytes_ "bytes" ) import ( "fmt" "foo.local/two" ) // If they are in order, but with extra newlines, join them. import ( "more" "std" ) // We need to split std vs non-std in this case too. import ( math "math" "foo.local" "foo.local/three" ) import ( "x" // don't mess up this comment "y" // or many // of them "z" ) // This used to crash gofumpt, as there's no space to insert an extra newline. import ( "std" "non.std/pkg" ) // All of the extra imports below are known to not belong in std. // For example/ and test/, see https://golang.org/issue/37641. import ( "io" "example/foo" "internal/bar" "test/baz" ) import ( "io" "nodomainmodextra" "nodomainmod" "nodomainmod/mod1/pkg1" "nodomainmod/mod2" ) import ( "io" "nodomainother/mod.withdot/pkg1" ) // TODO: fix issue 225. import ( "path/filepath" "time" "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/yaml" ) gofumpt-0.6.0/testdata/script/typeparams.txtar000066400000000000000000000024431455555333400216030ustar00rootroot00000000000000exec gofumpt foo.go cmp stdout foo.go.golden -- go.mod -- module test go 1.18 -- foo.go -- package p func Foo[A, B any](x A, y B) {} type Vector[T any] []T var v Vector[int ] type PredeclaredSignedInteger interface { int | int8 | int16 | int32 | int64 } type StringableSignedInteger interface { ~int | ~int8 | ~int16 | ~int32 | ~int64 String() string } type CombineEmbeds interface { fmt.Stringer comparable | io.Reader Foo() } func Caller() { Foo[int,int](1,2) } func Issue235[K interface { comparable constraints.Ordered }, V any](m map[K]V) []K { keys := maps.Keys(m) slices.Sort(keys) return keys } func multilineParams[V any](p1 V, p2 V) { println("body") } -- foo.go.golden -- package p func Foo[A, B any](x A, y B) {} type Vector[T any] []T var v Vector[int] type PredeclaredSignedInteger interface { int | int8 | int16 | int32 | int64 } type StringableSignedInteger interface { ~int | ~int8 | ~int16 | ~int32 | ~int64 String() string } type CombineEmbeds interface { fmt.Stringer comparable | io.Reader Foo() } func Caller() { Foo[int, int](1, 2) } func Issue235[K interface { comparable constraints.Ordered }, V any](m map[K]V) []K { keys := maps.Keys(m) slices.Sort(keys) return keys } func multilineParams[V any](p1 V, p2 V, ) { println("body") } gofumpt-0.6.0/testdata/script/workspaces.txtar000066400000000000000000000010111455555333400215650ustar00rootroot00000000000000# Whether we run gofumpt from inside or outside a module, # we should always use the information from its go.mod. # We also test that we don't get confused by the presence of go.work. exec gofumpt a/go112.go cmp stdout a/go113.go cd a exec gofumpt go112.go cmp stdout go113.go -- go.work -- go 1.18 use ./a use ./b -- a/go.mod -- module a go 1.18 -- a/a.go -- package a -- a/go112.go -- package main const x = 0777 -- a/go113.go -- package main const x = 0o777 -- b/go.mod -- module b go 1.18 -- b/b.go -- package b gofumpt-0.6.0/ulimit_linux_test.go000066400000000000000000000052171455555333400173270ustar00rootroot00000000000000// Copyright (c) 2019, Daniel Martí // See LICENSE for licensing information package main import ( "bytes" "fmt" "os" "os/exec" "path/filepath" "strconv" "testing" qt "github.com/frankban/quicktest" "golang.org/x/sys/unix" ) func init() { // Here rather than in TestMain, to reuse the unix build tag. if limit := os.Getenv("TEST_WITH_FILE_LIMIT"); limit != "" { n, err := strconv.ParseUint(limit, 10, 64) if err != nil { panic(err) } rlimit := unix.Rlimit{Cur: n, Max: n} if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit); err != nil { panic(err) } os.Exit(main1()) } } func TestWithLowOpenFileLimit(t *testing.T) { // Safe to run in parallel, as we only change the limit for child processes. t.Parallel() tempDir := t.TempDir() testBinary, err := os.Executable() qt.Assert(t, err, qt.IsNil) const ( // Enough directories to run into the ulimit. // Enough number of files in total to run into the ulimit. numberDirs = 500 numberFilesPerDir = 20 numberFilesTotal = numberDirs * numberFilesPerDir ) t.Logf("writing %d tiny Go files", numberFilesTotal) var allGoFiles []string for i := 0; i < numberDirs; i++ { // Prefix "p", so the package name is a valid identifier. // Add one go.mod file per directory as well, // which will help catch data races when loading module info. dirName := fmt.Sprintf("p%03d", i) dirPath := filepath.Join(tempDir, dirName) err := os.MkdirAll(dirPath, 0o777) qt.Assert(t, err, qt.IsNil) err = os.WriteFile(filepath.Join(dirPath, "go.mod"), []byte(fmt.Sprintf("module %s\n\ngo 1.16", dirName)), 0o666) qt.Assert(t, err, qt.IsNil) for j := 0; j < numberFilesPerDir; j++ { filePath := filepath.Join(dirPath, fmt.Sprintf("%03d.go", j)) err := os.WriteFile(filePath, // Extra newlines so that "-l" prints all paths. []byte(fmt.Sprintf("package %s\n\n\n", dirName)), 0o666) qt.Assert(t, err, qt.IsNil) allGoFiles = append(allGoFiles, filePath) } } if len(allGoFiles) != numberFilesTotal { panic("allGoFiles doesn't have the expected number of files?") } runGofmt := func(paths ...string) { t.Logf("running with %d paths", len(paths)) cmd := exec.Command(testBinary, append([]string{"-l"}, paths...)...) // 256 is a relatively common low limit, e.g. on Mac. cmd.Env = append(os.Environ(), "TEST_WITH_FILE_LIMIT=256") out, err := cmd.Output() var stderr []byte if err, _ := err.(*exec.ExitError); err != nil { stderr = err.Stderr } qt.Assert(t, err, qt.IsNil, qt.Commentf("stderr:\n%s", stderr)) qt.Assert(t, bytes.Count(out, []byte("\n")), qt.Equals, len(allGoFiles)) } runGofmt(tempDir) runGofmt(allGoFiles...) }