pax_global_header00006660000000000000000000000064146370207270014522gustar00rootroot0000000000000052 comment=d58be1cb16e62a9821b6dbd0157b8c7ff0b667ec mod-0.19.0/000077500000000000000000000000001463702072700123705ustar00rootroot00000000000000mod-0.19.0/LICENSE000066400000000000000000000027071463702072700134030ustar00rootroot00000000000000Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. mod-0.19.0/PATENTS000066400000000000000000000024271463702072700134360ustar00rootroot00000000000000Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. mod-0.19.0/README.md000066400000000000000000000013621463702072700136510ustar00rootroot00000000000000# mod [![PkgGoDev](https://pkg.go.dev/badge/golang.org/x/mod)](https://pkg.go.dev/golang.org/x/mod) This repository holds packages for writing tools that work directly with Go module mechanics. That is, it is for direct manipulation of Go modules themselves. It is NOT about supporting general development tools that need to do things like load packages in module mode. That use case, where modules are incidental rather than the focus, should remain in [x/tools](https://pkg.go.dev/golang/org/x/tools), specifically [x/tools/go/packages](https://pkg.go.dev/golang.org/x/tools/go/packages). The specific case of loading packages should still be done by invoking the go command, which remains the single point of truth for package loading algorithms. mod-0.19.0/codereview.cfg000066400000000000000000000000251463702072700152020ustar00rootroot00000000000000issuerepo: golang/go mod-0.19.0/go.mod000066400000000000000000000001241463702072700134730ustar00rootroot00000000000000module golang.org/x/mod go 1.18 require golang.org/x/tools v0.13.0 // tagx:ignore mod-0.19.0/go.sum000066400000000000000000000002351463702072700135230ustar00rootroot00000000000000golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= mod-0.19.0/gosumcheck/000077500000000000000000000000001463702072700145205ustar00rootroot00000000000000mod-0.19.0/gosumcheck/main.go000066400000000000000000000121201463702072700157670ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Gosumcheck checks a go.sum file against a go.sum database server. // // Usage: // // gosumcheck [-h H] [-k key] [-u url] [-v] go.sum // // The -h flag changes the tile height (default 8). // // The -k flag changes the go.sum database server key. // // The -u flag overrides the URL of the server (usually set from the key name). // // The -v flag enables verbose output. // In particular, it causes gosumcheck to report // the URL and elapsed time for each server request. // // WARNING! WARNING! WARNING! // // Gosumcheck is meant as a proof of concept demo and should not be // used in production scripts or continuous integration testing. // It does not cache any downloaded information from run to run, // making it expensive and also keeping it from detecting server // misbehavior or successful HTTPS man-in-the-middle timeline forks. // // To discourage misuse in automated settings, gosumcheck does not // set any exit status to report whether any problems were found. package main import ( "flag" "fmt" "io" "log" "net/http" "os" "os/exec" "strings" "sync" "time" "golang.org/x/mod/sumdb" ) func usage() { fmt.Fprintf(os.Stderr, "usage: gosumcheck [-h H] [-k key] [-u url] [-v] go.sum...\n") os.Exit(2) } var ( height = flag.Int("h", 8, "tile height") vkey = flag.String("k", "sum.golang.org+033de0ae+Ac4zctda0e5eza+HJyk9SxEdh+s3Ux18htTTAD8OuAn8", "key") url = flag.String("u", "", "url to server (overriding name)") vflag = flag.Bool("v", false, "enable verbose output") ) func main() { log.SetPrefix("notecheck: ") log.SetFlags(0) flag.Usage = usage flag.Parse() if flag.NArg() < 1 { usage() } client := sumdb.NewClient(new(clientOps)) // Look in environment explicitly, so that if 'go env' is old and // doesn't know about GONOSUMDB, we at least get anything // set in the environment. env := os.Getenv("GONOSUMDB") if env == "" { out, err := exec.Command("go", "env", "GONOSUMDB").CombinedOutput() if err != nil { log.Fatalf("go env GONOSUMDB: %v\n%s", err, out) } env = strings.TrimSpace(string(out)) } client.SetGONOSUMDB(env) for _, arg := range flag.Args() { data, err := os.ReadFile(arg) if err != nil { log.Fatal(err) } checkGoSum(client, arg, data) } } func checkGoSum(client *sumdb.Client, name string, data []byte) { lines := strings.Split(string(data), "\n") if lines[len(lines)-1] != "" { log.Printf("error: final line missing newline") return } lines = lines[:len(lines)-1] errs := make([]string, len(lines)) var wg sync.WaitGroup for i, line := range lines { wg.Add(1) go func(i int, line string) { defer wg.Done() f := strings.Fields(line) if len(f) != 3 { errs[i] = "invalid number of fields" return } dbLines, err := client.Lookup(f[0], f[1]) if err != nil { if err == sumdb.ErrGONOSUMDB { errs[i] = fmt.Sprintf("%s@%s: %v", f[0], f[1], err) } else { // Otherwise Lookup properly adds the prefix itself. errs[i] = err.Error() } return } hashAlgPrefix := f[0] + " " + f[1] + " " + f[2][:strings.Index(f[2], ":")+1] for _, dbLine := range dbLines { if dbLine == line { return } if strings.HasPrefix(dbLine, hashAlgPrefix) { errs[i] = fmt.Sprintf("%s@%s hash mismatch: have %s, want %s", f[0], f[1], line, dbLine) return } } errs[i] = fmt.Sprintf("%s@%s hash algorithm mismatch: have %s, want one of:\n\t%s", f[0], f[1], line, strings.Join(dbLines, "\n\t")) }(i, line) } wg.Wait() for i, err := range errs { if err != "" { fmt.Printf("%s:%d: %s\n", name, i+1, err) } } } type clientOps struct{} func (*clientOps) ReadConfig(file string) ([]byte, error) { if file == "key" { return []byte(*vkey), nil } if strings.HasSuffix(file, "/latest") { // Looking for cached latest tree head. // Empty result means empty tree. return []byte{}, nil } return nil, fmt.Errorf("unknown config %s", file) } func (*clientOps) WriteConfig(file string, old, new []byte) error { // Ignore writes. return nil } func (*clientOps) ReadCache(file string) ([]byte, error) { return nil, fmt.Errorf("no cache") } func (*clientOps) WriteCache(file string, data []byte) { // Ignore writes. } func (*clientOps) Log(msg string) { log.Print(msg) } func (*clientOps) SecurityError(msg string) { log.Fatal(msg) } func init() { http.DefaultClient.Timeout = 1 * time.Minute } func (*clientOps) ReadRemote(path string) ([]byte, error) { name := *vkey if i := strings.Index(name, "+"); i >= 0 { name = name[:i] } start := time.Now() target := "https://" + name + path if *url != "" { target = *url + path } resp, err := http.Get(target) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { return nil, fmt.Errorf("GET %v: %v", target, resp.Status) } data, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return nil, err } if *vflag { fmt.Fprintf(os.Stderr, "%.3fs %s\n", time.Since(start).Seconds(), target) } return data, nil } mod-0.19.0/gosumcheck/test.bash000077500000000000000000000002621463702072700163410ustar00rootroot00000000000000#!/bin/bash set -e go build -o gosumcheck.exe export GONOSUMDB=*/text # rsc.io/text but not golang.org/x/text ./gosumcheck.exe "$@" -v test.sum rm -f ./gosumcheck.exe echo PASS mod-0.19.0/gosumcheck/test.sum000066400000000000000000000007601463702072700162300ustar00rootroot00000000000000golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:qgOY6WgZOaTkIIMiVjBQcw93ERBE4m30iBm00nkL0i8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y= rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0= rsc.io/text v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= mod-0.19.0/internal/000077500000000000000000000000001463702072700142045ustar00rootroot00000000000000mod-0.19.0/internal/lazyregexp/000077500000000000000000000000001463702072700163765ustar00rootroot00000000000000mod-0.19.0/internal/lazyregexp/lazyre.go000066400000000000000000000035171463702072700202410ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package lazyregexp is a thin wrapper over regexp, allowing the use of global // regexp variables without forcing them to be compiled at init. package lazyregexp import ( "os" "regexp" "strings" "sync" ) // Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be // compiled the first time it is needed. type Regexp struct { str string once sync.Once rx *regexp.Regexp } func (r *Regexp) re() *regexp.Regexp { r.once.Do(r.build) return r.rx } func (r *Regexp) build() { r.rx = regexp.MustCompile(r.str) r.str = "" } func (r *Regexp) FindSubmatch(s []byte) [][]byte { return r.re().FindSubmatch(s) } func (r *Regexp) FindStringSubmatch(s string) []string { return r.re().FindStringSubmatch(s) } func (r *Regexp) FindStringSubmatchIndex(s string) []int { return r.re().FindStringSubmatchIndex(s) } func (r *Regexp) ReplaceAllString(src, repl string) string { return r.re().ReplaceAllString(src, repl) } func (r *Regexp) FindString(s string) string { return r.re().FindString(s) } func (r *Regexp) FindAllString(s string, n int) []string { return r.re().FindAllString(s, n) } func (r *Regexp) MatchString(s string) bool { return r.re().MatchString(s) } func (r *Regexp) SubexpNames() []string { return r.re().SubexpNames() } var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") // New creates a new lazy regexp, delaying the compiling work until it is first // needed. If the code is being run as part of tests, the regexp compiling will // happen immediately. func New(str string) *Regexp { lr := &Regexp{str: str} if inTest { // In tests, always compile the regexps early. lr.re() } return lr } mod-0.19.0/modfile/000077500000000000000000000000001463702072700140075ustar00rootroot00000000000000mod-0.19.0/modfile/print.go000066400000000000000000000074361463702072700155040ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Module file printer. package modfile import ( "bytes" "fmt" "strings" ) // Format returns a go.mod file as a byte slice, formatted in standard style. func Format(f *FileSyntax) []byte { pr := &printer{} pr.file(f) // remove trailing blank lines b := pr.Bytes() for len(b) > 0 && b[len(b)-1] == '\n' && (len(b) == 1 || b[len(b)-2] == '\n') { b = b[:len(b)-1] } return b } // A printer collects the state during printing of a file or expression. type printer struct { bytes.Buffer // output buffer comment []Comment // pending end-of-line comments margin int // left margin (indent), a number of tabs } // printf prints to the buffer. func (p *printer) printf(format string, args ...interface{}) { fmt.Fprintf(p, format, args...) } // indent returns the position on the current line, in bytes, 0-indexed. func (p *printer) indent() int { b := p.Bytes() n := 0 for n < len(b) && b[len(b)-1-n] != '\n' { n++ } return n } // newline ends the current line, flushing end-of-line comments. func (p *printer) newline() { if len(p.comment) > 0 { p.printf(" ") for i, com := range p.comment { if i > 0 { p.trim() p.printf("\n") for i := 0; i < p.margin; i++ { p.printf("\t") } } p.printf("%s", strings.TrimSpace(com.Token)) } p.comment = p.comment[:0] } p.trim() if b := p.Bytes(); len(b) == 0 || (len(b) >= 2 && b[len(b)-1] == '\n' && b[len(b)-2] == '\n') { // skip the blank line at top of file or after a blank line } else { p.printf("\n") } for i := 0; i < p.margin; i++ { p.printf("\t") } } // trim removes trailing spaces and tabs from the current line. func (p *printer) trim() { // Remove trailing spaces and tabs from line we're about to end. b := p.Bytes() n := len(b) for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') { n-- } p.Truncate(n) } // file formats the given file into the print buffer. func (p *printer) file(f *FileSyntax) { for _, com := range f.Before { p.printf("%s", strings.TrimSpace(com.Token)) p.newline() } for i, stmt := range f.Stmt { switch x := stmt.(type) { case *CommentBlock: // comments already handled p.expr(x) default: p.expr(x) p.newline() } for _, com := range stmt.Comment().After { p.printf("%s", strings.TrimSpace(com.Token)) p.newline() } if i+1 < len(f.Stmt) { p.newline() } } } func (p *printer) expr(x Expr) { // Emit line-comments preceding this expression. if before := x.Comment().Before; len(before) > 0 { // Want to print a line comment. // Line comments must be at the current margin. p.trim() if p.indent() > 0 { // There's other text on the line. Start a new line. p.printf("\n") } // Re-indent to margin. for i := 0; i < p.margin; i++ { p.printf("\t") } for _, com := range before { p.printf("%s", strings.TrimSpace(com.Token)) p.newline() } } switch x := x.(type) { default: panic(fmt.Errorf("printer: unexpected type %T", x)) case *CommentBlock: // done case *LParen: p.printf("(") case *RParen: p.printf(")") case *Line: p.tokens(x.Token) case *LineBlock: p.tokens(x.Token) p.printf(" ") p.expr(&x.LParen) p.margin++ for _, l := range x.Line { p.newline() p.expr(l) } p.margin-- p.newline() p.expr(&x.RParen) } // Queue end-of-line comments for printing when we // reach the end of the line. p.comment = append(p.comment, x.Comment().Suffix...) } func (p *printer) tokens(tokens []string) { sep := "" for _, t := range tokens { if t == "," || t == ")" || t == "]" || t == "}" { sep = "" } p.printf("%s%s", sep, t) sep = " " if t == "(" || t == "[" || t == "{" { sep = "" } } } mod-0.19.0/modfile/read.go000066400000000000000000000562061463702072700152620ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modfile import ( "bytes" "errors" "fmt" "os" "strconv" "strings" "unicode" "unicode/utf8" ) // A Position describes an arbitrary source position in a file, including the // file, line, column, and byte offset. type Position struct { Line int // line in input (starting at 1) LineRune int // rune in line (starting at 1) Byte int // byte in input (starting at 0) } // add returns the position at the end of s, assuming it starts at p. func (p Position) add(s string) Position { p.Byte += len(s) if n := strings.Count(s, "\n"); n > 0 { p.Line += n s = s[strings.LastIndex(s, "\n")+1:] p.LineRune = 1 } p.LineRune += utf8.RuneCountInString(s) return p } // An Expr represents an input element. type Expr interface { // Span returns the start and end position of the expression, // excluding leading or trailing comments. Span() (start, end Position) // Comment returns the comments attached to the expression. // This method would normally be named 'Comments' but that // would interfere with embedding a type of the same name. Comment() *Comments } // A Comment represents a single // comment. type Comment struct { Start Position Token string // without trailing newline Suffix bool // an end of line (not whole line) comment } // Comments collects the comments associated with an expression. type Comments struct { Before []Comment // whole-line comments before this expression Suffix []Comment // end-of-line comments after this expression // For top-level expressions only, After lists whole-line // comments following the expression. After []Comment } // Comment returns the receiver. This isn't useful by itself, but // a [Comments] struct is embedded into all the expression // implementation types, and this gives each of those a Comment // method to satisfy the Expr interface. func (c *Comments) Comment() *Comments { return c } // A FileSyntax represents an entire go.mod file. type FileSyntax struct { Name string // file path Comments Stmt []Expr } func (x *FileSyntax) Span() (start, end Position) { if len(x.Stmt) == 0 { return } start, _ = x.Stmt[0].Span() _, end = x.Stmt[len(x.Stmt)-1].Span() return start, end } // addLine adds a line containing the given tokens to the file. // // If the first token of the hint matches the first token of the // line, the new line is added at the end of the block containing hint, // extracting hint into a new block if it is not yet in one. // // If the hint is non-nil buts its first token does not match, // the new line is added after the block containing hint // (or hint itself, if not in a block). // // If no hint is provided, addLine appends the line to the end of // the last block with a matching first token, // or to the end of the file if no such block exists. func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { if hint == nil { // If no hint given, add to the last statement of the given type. Loop: for i := len(x.Stmt) - 1; i >= 0; i-- { stmt := x.Stmt[i] switch stmt := stmt.(type) { case *Line: if stmt.Token != nil && stmt.Token[0] == tokens[0] { hint = stmt break Loop } case *LineBlock: if stmt.Token[0] == tokens[0] { hint = stmt break Loop } } } } newLineAfter := func(i int) *Line { new := &Line{Token: tokens} if i == len(x.Stmt) { x.Stmt = append(x.Stmt, new) } else { x.Stmt = append(x.Stmt, nil) copy(x.Stmt[i+2:], x.Stmt[i+1:]) x.Stmt[i+1] = new } return new } if hint != nil { for i, stmt := range x.Stmt { switch stmt := stmt.(type) { case *Line: if stmt == hint { if stmt.Token == nil || stmt.Token[0] != tokens[0] { return newLineAfter(i) } // Convert line to line block. stmt.InBlock = true block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} stmt.Token = stmt.Token[1:] x.Stmt[i] = block new := &Line{Token: tokens[1:], InBlock: true} block.Line = append(block.Line, new) return new } case *LineBlock: if stmt == hint { if stmt.Token[0] != tokens[0] { return newLineAfter(i) } new := &Line{Token: tokens[1:], InBlock: true} stmt.Line = append(stmt.Line, new) return new } for j, line := range stmt.Line { if line == hint { if stmt.Token[0] != tokens[0] { return newLineAfter(i) } // Add new line after hint within the block. stmt.Line = append(stmt.Line, nil) copy(stmt.Line[j+2:], stmt.Line[j+1:]) new := &Line{Token: tokens[1:], InBlock: true} stmt.Line[j+1] = new return new } } } } } new := &Line{Token: tokens} x.Stmt = append(x.Stmt, new) return new } func (x *FileSyntax) updateLine(line *Line, tokens ...string) { if line.InBlock { tokens = tokens[1:] } line.Token = tokens } // markRemoved modifies line so that it (and its end-of-line comment, if any) // will be dropped by (*FileSyntax).Cleanup. func (line *Line) markRemoved() { line.Token = nil line.Comments.Suffix = nil } // Cleanup cleans up the file syntax x after any edit operations. // To avoid quadratic behavior, (*Line).markRemoved marks the line as dead // by setting line.Token = nil but does not remove it from the slice // in which it appears. After edits have all been indicated, // calling Cleanup cleans out the dead lines. func (x *FileSyntax) Cleanup() { w := 0 for _, stmt := range x.Stmt { switch stmt := stmt.(type) { case *Line: if stmt.Token == nil { continue } case *LineBlock: ww := 0 for _, line := range stmt.Line { if line.Token != nil { stmt.Line[ww] = line ww++ } } if ww == 0 { continue } if ww == 1 && len(stmt.RParen.Comments.Before) == 0 { // Collapse block into single line. line := &Line{ Comments: Comments{ Before: commentsAdd(stmt.Before, stmt.Line[0].Before), Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), After: commentsAdd(stmt.Line[0].After, stmt.After), }, Token: stringsAdd(stmt.Token, stmt.Line[0].Token), } x.Stmt[w] = line w++ continue } stmt.Line = stmt.Line[:ww] } x.Stmt[w] = stmt w++ } x.Stmt = x.Stmt[:w] } func commentsAdd(x, y []Comment) []Comment { return append(x[:len(x):len(x)], y...) } func stringsAdd(x, y []string) []string { return append(x[:len(x):len(x)], y...) } // A CommentBlock represents a top-level block of comments separate // from any rule. type CommentBlock struct { Comments Start Position } func (x *CommentBlock) Span() (start, end Position) { return x.Start, x.Start } // A Line is a single line of tokens. type Line struct { Comments Start Position Token []string InBlock bool End Position } func (x *Line) Span() (start, end Position) { return x.Start, x.End } // A LineBlock is a factored block of lines, like // // require ( // "x" // "y" // ) type LineBlock struct { Comments Start Position LParen LParen Token []string Line []*Line RParen RParen } func (x *LineBlock) Span() (start, end Position) { return x.Start, x.RParen.Pos.add(")") } // An LParen represents the beginning of a parenthesized line block. // It is a place to store suffix comments. type LParen struct { Comments Pos Position } func (x *LParen) Span() (start, end Position) { return x.Pos, x.Pos.add(")") } // An RParen represents the end of a parenthesized line block. // It is a place to store whole-line (before) comments. type RParen struct { Comments Pos Position } func (x *RParen) Span() (start, end Position) { return x.Pos, x.Pos.add(")") } // An input represents a single input file being parsed. type input struct { // Lexing state. filename string // name of input file, for errors complete []byte // entire input remaining []byte // remaining input tokenStart []byte // token being scanned to end of input token token // next token to be returned by lex, peek pos Position // current input position comments []Comment // accumulated comments // Parser state. file *FileSyntax // returned top-level syntax tree parseErrors ErrorList // errors encountered during parsing // Comment assignment state. pre []Expr // all expressions, in preorder traversal post []Expr // all expressions, in postorder traversal } func newInput(filename string, data []byte) *input { return &input{ filename: filename, complete: data, remaining: data, pos: Position{Line: 1, LineRune: 1, Byte: 0}, } } // parse parses the input file. func parse(file string, data []byte) (f *FileSyntax, err error) { // The parser panics for both routine errors like syntax errors // and for programmer bugs like array index errors. // Turn both into error returns. Catching bug panics is // especially important when processing many files. in := newInput(file, data) defer func() { if e := recover(); e != nil && e != &in.parseErrors { in.parseErrors = append(in.parseErrors, Error{ Filename: in.filename, Pos: in.pos, Err: fmt.Errorf("internal error: %v", e), }) } if err == nil && len(in.parseErrors) > 0 { err = in.parseErrors } }() // Prime the lexer by reading in the first token. It will be available // in the next peek() or lex() call. in.readToken() // Invoke the parser. in.parseFile() if len(in.parseErrors) > 0 { return nil, in.parseErrors } in.file.Name = in.filename // Assign comments to nearby syntax. in.assignComments() return in.file, nil } // Error is called to report an error. // Error does not return: it panics. func (in *input) Error(s string) { in.parseErrors = append(in.parseErrors, Error{ Filename: in.filename, Pos: in.pos, Err: errors.New(s), }) panic(&in.parseErrors) } // eof reports whether the input has reached end of file. func (in *input) eof() bool { return len(in.remaining) == 0 } // peekRune returns the next rune in the input without consuming it. func (in *input) peekRune() int { if len(in.remaining) == 0 { return 0 } r, _ := utf8.DecodeRune(in.remaining) return int(r) } // peekPrefix reports whether the remaining input begins with the given prefix. func (in *input) peekPrefix(prefix string) bool { // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) // but without the allocation of the []byte copy of prefix. for i := 0; i < len(prefix); i++ { if i >= len(in.remaining) || in.remaining[i] != prefix[i] { return false } } return true } // readRune consumes and returns the next rune in the input. func (in *input) readRune() int { if len(in.remaining) == 0 { in.Error("internal lexer error: readRune at EOF") } r, size := utf8.DecodeRune(in.remaining) in.remaining = in.remaining[size:] if r == '\n' { in.pos.Line++ in.pos.LineRune = 1 } else { in.pos.LineRune++ } in.pos.Byte += size return int(r) } type token struct { kind tokenKind pos Position endPos Position text string } type tokenKind int const ( _EOF tokenKind = -(iota + 1) _EOLCOMMENT _IDENT _STRING _COMMENT // newlines and punctuation tokens are allowed as ASCII codes. ) func (k tokenKind) isComment() bool { return k == _COMMENT || k == _EOLCOMMENT } // isEOL returns whether a token terminates a line. func (k tokenKind) isEOL() bool { return k == _EOF || k == _EOLCOMMENT || k == '\n' } // startToken marks the beginning of the next input token. // It must be followed by a call to endToken, once the token's text has // been consumed using readRune. func (in *input) startToken() { in.tokenStart = in.remaining in.token.text = "" in.token.pos = in.pos } // endToken marks the end of an input token. // It records the actual token string in tok.text. // A single trailing newline (LF or CRLF) will be removed from comment tokens. func (in *input) endToken(kind tokenKind) { in.token.kind = kind text := string(in.tokenStart[:len(in.tokenStart)-len(in.remaining)]) if kind.isComment() { if strings.HasSuffix(text, "\r\n") { text = text[:len(text)-2] } else { text = strings.TrimSuffix(text, "\n") } } in.token.text = text in.token.endPos = in.pos } // peek returns the kind of the next token returned by lex. func (in *input) peek() tokenKind { return in.token.kind } // lex is called from the parser to obtain the next input token. func (in *input) lex() token { tok := in.token in.readToken() return tok } // readToken lexes the next token from the text and stores it in in.token. func (in *input) readToken() { // Skip past spaces, stopping at non-space or EOF. for !in.eof() { c := in.peekRune() if c == ' ' || c == '\t' || c == '\r' { in.readRune() continue } // Comment runs to end of line. if in.peekPrefix("//") { in.startToken() // Is this comment the only thing on its line? // Find the last \n before this // and see if it's all // spaces from there to here. i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 in.readRune() in.readRune() // Consume comment. for len(in.remaining) > 0 && in.readRune() != '\n' { } // If we are at top level (not in a statement), hand the comment to // the parser as a _COMMENT token. The grammar is written // to handle top-level comments itself. if !suffix { in.endToken(_COMMENT) return } // Otherwise, save comment for later attachment to syntax tree. in.endToken(_EOLCOMMENT) in.comments = append(in.comments, Comment{in.token.pos, in.token.text, suffix}) return } if in.peekPrefix("/*") { in.Error("mod files must use // comments (not /* */ comments)") } // Found non-space non-comment. break } // Found the beginning of the next token. in.startToken() // End of file. if in.eof() { in.endToken(_EOF) return } // Punctuation tokens. switch c := in.peekRune(); c { case '\n', '(', ')', '[', ']', '{', '}', ',': in.readRune() in.endToken(tokenKind(c)) return case '"', '`': // quoted string quote := c in.readRune() for { if in.eof() { in.pos = in.token.pos in.Error("unexpected EOF in string") } if in.peekRune() == '\n' { in.Error("unexpected newline in string") } c := in.readRune() if c == quote { break } if c == '\\' && quote != '`' { if in.eof() { in.pos = in.token.pos in.Error("unexpected EOF in string") } in.readRune() } } in.endToken(_STRING) return } // Checked all punctuation. Must be identifier token. if c := in.peekRune(); !isIdent(c) { in.Error(fmt.Sprintf("unexpected input character %#q", c)) } // Scan over identifier. for isIdent(in.peekRune()) { if in.peekPrefix("//") { break } if in.peekPrefix("/*") { in.Error("mod files must use // comments (not /* */ comments)") } in.readRune() } in.endToken(_IDENT) } // isIdent reports whether c is an identifier rune. // We treat most printable runes as identifier runes, except for a handful of // ASCII punctuation characters. func isIdent(c int) bool { switch r := rune(c); r { case ' ', '(', ')', '[', ']', '{', '}', ',': return false default: return !unicode.IsSpace(r) && unicode.IsPrint(r) } } // Comment assignment. // We build two lists of all subexpressions, preorder and postorder. // The preorder list is ordered by start location, with outer expressions first. // The postorder list is ordered by end location, with outer expressions last. // We use the preorder list to assign each whole-line comment to the syntax // immediately following it, and we use the postorder list to assign each // end-of-line comment to the syntax immediately preceding it. // order walks the expression adding it and its subexpressions to the // preorder and postorder lists. func (in *input) order(x Expr) { if x != nil { in.pre = append(in.pre, x) } switch x := x.(type) { default: panic(fmt.Errorf("order: unexpected type %T", x)) case nil: // nothing case *LParen, *RParen: // nothing case *CommentBlock: // nothing case *Line: // nothing case *FileSyntax: for _, stmt := range x.Stmt { in.order(stmt) } case *LineBlock: in.order(&x.LParen) for _, l := range x.Line { in.order(l) } in.order(&x.RParen) } if x != nil { in.post = append(in.post, x) } } // assignComments attaches comments to nearby syntax. func (in *input) assignComments() { const debug = false // Generate preorder and postorder lists. in.order(in.file) // Split into whole-line comments and suffix comments. var line, suffix []Comment for _, com := range in.comments { if com.Suffix { suffix = append(suffix, com) } else { line = append(line, com) } } if debug { for _, c := range line { fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) } } // Assign line comments to syntax immediately following. for _, x := range in.pre { start, _ := x.Span() if debug { fmt.Fprintf(os.Stderr, "pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) } xcom := x.Comment() for len(line) > 0 && start.Byte >= line[0].Start.Byte { if debug { fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) } xcom.Before = append(xcom.Before, line[0]) line = line[1:] } } // Remaining line comments go at end of file. in.file.After = append(in.file.After, line...) if debug { for _, c := range suffix { fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) } } // Assign suffix comments to syntax immediately before. for i := len(in.post) - 1; i >= 0; i-- { x := in.post[i] start, end := x.Span() if debug { fmt.Fprintf(os.Stderr, "post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) } // Do not assign suffix comments to end of line block or whole file. // Instead assign them to the last element inside. switch x.(type) { case *FileSyntax: continue } // Do not assign suffix comments to something that starts // on an earlier line, so that in // // x ( y // z ) // comment // // we assign the comment to z and not to x ( ... ). if start.Line != end.Line { continue } xcom := x.Comment() for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { if debug { fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) } xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) suffix = suffix[:len(suffix)-1] } } // We assigned suffix comments in reverse. // If multiple suffix comments were appended to the same // expression node, they are now in reverse. Fix that. for _, x := range in.post { reverseComments(x.Comment().Suffix) } // Remaining suffix comments go at beginning of file. in.file.Before = append(in.file.Before, suffix...) } // reverseComments reverses the []Comment list. func reverseComments(list []Comment) { for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { list[i], list[j] = list[j], list[i] } } func (in *input) parseFile() { in.file = new(FileSyntax) var cb *CommentBlock for { switch in.peek() { case '\n': in.lex() if cb != nil { in.file.Stmt = append(in.file.Stmt, cb) cb = nil } case _COMMENT: tok := in.lex() if cb == nil { cb = &CommentBlock{Start: tok.pos} } com := cb.Comment() com.Before = append(com.Before, Comment{Start: tok.pos, Token: tok.text}) case _EOF: if cb != nil { in.file.Stmt = append(in.file.Stmt, cb) } return default: in.parseStmt() if cb != nil { in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before cb = nil } } } } func (in *input) parseStmt() { tok := in.lex() start := tok.pos end := tok.endPos tokens := []string{tok.text} for { tok := in.lex() switch { case tok.kind.isEOL(): in.file.Stmt = append(in.file.Stmt, &Line{ Start: start, Token: tokens, End: end, }) return case tok.kind == '(': if next := in.peek(); next.isEOL() { // Start of block: no more tokens on this line. in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, tokens, tok)) return } else if next == ')' { rparen := in.lex() if in.peek().isEOL() { // Empty block. in.lex() in.file.Stmt = append(in.file.Stmt, &LineBlock{ Start: start, Token: tokens, LParen: LParen{Pos: tok.pos}, RParen: RParen{Pos: rparen.pos}, }) return } // '( )' in the middle of the line, not a block. tokens = append(tokens, tok.text, rparen.text) } else { // '(' in the middle of the line, not a block. tokens = append(tokens, tok.text) } default: tokens = append(tokens, tok.text) end = tok.endPos } } } func (in *input) parseLineBlock(start Position, token []string, lparen token) *LineBlock { x := &LineBlock{ Start: start, Token: token, LParen: LParen{Pos: lparen.pos}, } var comments []Comment for { switch in.peek() { case _EOLCOMMENT: // Suffix comment, will be attached later by assignComments. in.lex() case '\n': // Blank line. Add an empty comment to preserve it. in.lex() if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { comments = append(comments, Comment{}) } case _COMMENT: tok := in.lex() comments = append(comments, Comment{Start: tok.pos, Token: tok.text}) case _EOF: in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) case ')': rparen := in.lex() x.RParen.Before = comments x.RParen.Pos = rparen.pos if !in.peek().isEOL() { in.Error("syntax error (expected newline after closing paren)") } in.lex() return x default: l := in.parseLine() x.Line = append(x.Line, l) l.Comment().Before = comments comments = nil } } } func (in *input) parseLine() *Line { tok := in.lex() if tok.kind.isEOL() { in.Error("internal parse error: parseLine at end of line") } start := tok.pos end := tok.endPos tokens := []string{tok.text} for { tok := in.lex() if tok.kind.isEOL() { return &Line{ Start: start, Token: tokens, End: end, InBlock: true, } } tokens = append(tokens, tok.text) end = tok.endPos } } var ( slashSlash = []byte("//") moduleStr = []byte("module") ) // ModulePath returns the module path from the gomod file text. // If it cannot find a module path, it returns an empty string. // It is tolerant of unrelated problems in the go.mod file. func ModulePath(mod []byte) string { for len(mod) > 0 { line := mod mod = nil if i := bytes.IndexByte(line, '\n'); i >= 0 { line, mod = line[:i], line[i+1:] } if i := bytes.Index(line, slashSlash); i >= 0 { line = line[:i] } line = bytes.TrimSpace(line) if !bytes.HasPrefix(line, moduleStr) { continue } line = line[len(moduleStr):] n := len(line) line = bytes.TrimSpace(line) if len(line) == n || len(line) == 0 { continue } if line[0] == '"' || line[0] == '`' { p, err := strconv.Unquote(string(line)) if err != nil { return "" // malformed quoted string or multiline module path } return p } return string(line) } return "" // missing module path } mod-0.19.0/modfile/read_test.go000066400000000000000000000400601463702072700163100ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modfile import ( "bytes" "fmt" "os" "os/exec" "path/filepath" "reflect" "strings" "testing" ) // exists reports whether the named file exists. func exists(name string) bool { _, err := os.Stat(name) return err == nil } // Test that reading and then writing the golden files // does not change their output. func TestPrintGolden(t *testing.T) { outs, err := filepath.Glob("testdata/*.golden") if err != nil { t.Fatal(err) } for _, out := range outs { out := out name := strings.TrimSuffix(filepath.Base(out), ".golden") t.Run(name, func(t *testing.T) { t.Parallel() testPrint(t, out, out) }) } } // testPrint is a helper for testing the printer. // It reads the file named in, reformats it, and compares // the result to the file named out. func testPrint(t *testing.T, in, out string) { data, err := os.ReadFile(in) if err != nil { t.Error(err) return } golden, err := os.ReadFile(out) if err != nil { t.Error(err) return } base := "testdata/" + filepath.Base(in) f, err := parse(in, data) if err != nil { t.Error(err) return } ndata := Format(f) if !bytes.Equal(ndata, golden) { t.Errorf("formatted %s incorrectly: diff shows -golden, +ours", base) tdiff(t, string(golden), string(ndata)) return } } // TestParsePunctuation verifies that certain ASCII punctuation characters // (brackets, commas) are lexed as separate tokens, even when they're // surrounded by identifier characters. func TestParsePunctuation(t *testing.T) { for _, test := range []struct { desc, src, want string }{ {"paren", "require ()", "require ( )"}, {"brackets", "require []{},", "require [ ] { } ,"}, {"mix", "require a[b]c{d}e,", "require a [ b ] c { d } e ,"}, {"block_mix", "require (\n\ta[b]\n)", "require ( a [ b ] )"}, {"interval", "require [v1.0.0, v1.1.0)", "require [ v1.0.0 , v1.1.0 )"}, } { t.Run(test.desc, func(t *testing.T) { f, err := parse("go.mod", []byte(test.src)) if err != nil { t.Fatalf("parsing %q: %v", test.src, err) } var tokens []string for _, stmt := range f.Stmt { switch stmt := stmt.(type) { case *Line: tokens = append(tokens, stmt.Token...) case *LineBlock: tokens = append(tokens, stmt.Token...) tokens = append(tokens, "(") for _, line := range stmt.Line { tokens = append(tokens, line.Token...) } tokens = append(tokens, ")") default: t.Fatalf("parsing %q: unexpected statement of type %T", test.src, stmt) } } got := strings.Join(tokens, " ") if got != test.want { t.Errorf("parsing %q: got %q, want %q", test.src, got, test.want) } }) } } func TestParseLax(t *testing.T) { badFile := []byte(`module m surprise attack x y ( z ) exclude v1.2.3 replace <-!!! retract v1.2.3 v1.2.4 retract (v1.2.3, v1.2.4] retract v1.2.3 ( key1 value1 key2 value2 ) require good v1.0.0 `) f, err := ParseLax("file", badFile, nil) if err != nil { t.Fatalf("ParseLax did not ignore irrelevant errors: %v", err) } if f.Module == nil || f.Module.Mod.Path != "m" { t.Errorf("module directive was not parsed") } if len(f.Require) != 1 || f.Require[0].Mod.Path != "good" { t.Errorf("require directive at end of file was not parsed") } } // Test that when files in the testdata directory are parsed // and printed and parsed again, we get the same parse tree // both times. func TestPrintParse(t *testing.T) { outs, err := filepath.Glob("testdata/*") if err != nil { t.Fatal(err) } for _, out := range outs { out := out name := filepath.Base(out) if !strings.HasSuffix(out, ".in") && !strings.HasSuffix(out, ".golden") { continue } t.Run(name, func(t *testing.T) { t.Parallel() data, err := os.ReadFile(out) if err != nil { t.Fatal(err) } base := "testdata/" + filepath.Base(out) f, err := parse(base, data) if err != nil { t.Fatalf("parsing original: %v", err) } ndata := Format(f) f2, err := parse(base, ndata) if err != nil { t.Fatalf("parsing reformatted: %v", err) } eq := eqchecker{file: base} if err := eq.check(f, f2); err != nil { t.Errorf("not equal (parse/Format/parse): %v", err) } pf1, err := Parse(base, data, nil) if err != nil { switch base { case "testdata/block.golden", "testdata/block.in", "testdata/comment.golden", "testdata/comment.in", "testdata/rule1.golden": // ignore default: t.Errorf("should parse %v: %v", base, err) } } if err == nil { pf2, err := Parse(base, ndata, nil) if err != nil { t.Fatalf("Parsing reformatted: %v", err) } eq := eqchecker{file: base} if err := eq.check(pf1, pf2); err != nil { t.Errorf("not equal (parse/Format/Parse): %v", err) } ndata2, err := pf1.Format() if err != nil { t.Errorf("reformat: %v", err) } pf3, err := Parse(base, ndata2, nil) if err != nil { t.Fatalf("Parsing reformatted2: %v", err) } eq = eqchecker{file: base} if err := eq.check(pf1, pf3); err != nil { t.Errorf("not equal (Parse/Format/Parse): %v", err) } ndata = ndata2 } if strings.HasSuffix(out, ".in") { golden, err := os.ReadFile(strings.TrimSuffix(out, ".in") + ".golden") if err != nil { t.Fatal(err) } if !bytes.Equal(ndata, golden) { t.Errorf("formatted %s incorrectly: diff shows -golden, +ours", base) tdiff(t, string(golden), string(ndata)) return } } }) } } // An eqchecker holds state for checking the equality of two parse trees. type eqchecker struct { file string pos Position } // errorf returns an error described by the printf-style format and arguments, // inserting the current file position before the error text. func (eq *eqchecker) errorf(format string, args ...interface{}) error { return fmt.Errorf("%s:%d: %s", eq.file, eq.pos.Line, fmt.Sprintf(format, args...)) } // check checks that v and w represent the same parse tree. // If not, it returns an error describing the first difference. func (eq *eqchecker) check(v, w interface{}) error { return eq.checkValue(reflect.ValueOf(v), reflect.ValueOf(w)) } var ( posType = reflect.TypeOf(Position{}) commentsType = reflect.TypeOf(Comments{}) ) // checkValue checks that v and w represent the same parse tree. // If not, it returns an error describing the first difference. func (eq *eqchecker) checkValue(v, w reflect.Value) error { // inner returns the innermost expression for v. // if v is a non-nil interface value, it returns the concrete // value in the interface. inner := func(v reflect.Value) reflect.Value { for { if v.Kind() == reflect.Interface && !v.IsNil() { v = v.Elem() continue } break } return v } v = inner(v) w = inner(w) if v.Kind() == reflect.Invalid && w.Kind() == reflect.Invalid { return nil } if v.Kind() == reflect.Invalid { return eq.errorf("nil interface became %s", w.Type()) } if w.Kind() == reflect.Invalid { return eq.errorf("%s became nil interface", v.Type()) } if v.Type() != w.Type() { return eq.errorf("%s became %s", v.Type(), w.Type()) } if p, ok := v.Interface().(Expr); ok { eq.pos, _ = p.Span() } switch v.Kind() { default: return eq.errorf("unexpected type %s", v.Type()) case reflect.Bool, reflect.Int, reflect.String: vi := v.Interface() wi := w.Interface() if vi != wi { return eq.errorf("%v became %v", vi, wi) } case reflect.Slice: vl := v.Len() wl := w.Len() for i := 0; i < vl || i < wl; i++ { if i >= vl { return eq.errorf("unexpected %s", w.Index(i).Type()) } if i >= wl { return eq.errorf("missing %s", v.Index(i).Type()) } if err := eq.checkValue(v.Index(i), w.Index(i)); err != nil { return err } } case reflect.Struct: // Fields in struct must match. t := v.Type() n := t.NumField() for i := 0; i < n; i++ { tf := t.Field(i) switch { default: if err := eq.checkValue(v.Field(i), w.Field(i)); err != nil { return err } case tf.Type == posType: // ignore positions case tf.Type == commentsType: // ignore comment assignment } } case reflect.Ptr, reflect.Interface: if v.IsNil() != w.IsNil() { if v.IsNil() { return eq.errorf("unexpected %s", w.Elem().Type()) } return eq.errorf("missing %s", v.Elem().Type()) } if err := eq.checkValue(v.Elem(), w.Elem()); err != nil { return err } } return nil } // diff returns the output of running diff on b1 and b2. func diff(b1, b2 []byte) (data []byte, err error) { f1, err := os.CreateTemp("", "testdiff") if err != nil { return nil, err } defer os.Remove(f1.Name()) defer f1.Close() f2, err := os.CreateTemp("", "testdiff") if err != nil { return nil, err } defer os.Remove(f2.Name()) defer f2.Close() f1.Write(b1) f2.Write(b2) data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() if len(data) > 0 { // diff exits with a non-zero status when the files don't match. // Ignore that failure as long as we get output. err = nil } return } // tdiff logs the diff output to t.Error. func tdiff(t *testing.T, a, b string) { data, err := diff([]byte(a), []byte(b)) if err != nil { t.Error(err) return } t.Error(string(data)) } var modulePathTests = []struct { input []byte expected string }{ {input: []byte("module \"github.com/rsc/vgotest\""), expected: "github.com/rsc/vgotest"}, {input: []byte("module github.com/rsc/vgotest"), expected: "github.com/rsc/vgotest"}, {input: []byte("module \"github.com/rsc/vgotest\""), expected: "github.com/rsc/vgotest"}, {input: []byte("module github.com/rsc/vgotest"), expected: "github.com/rsc/vgotest"}, {input: []byte("module `github.com/rsc/vgotest`"), expected: "github.com/rsc/vgotest"}, {input: []byte("module \"github.com/rsc/vgotest/v2\""), expected: "github.com/rsc/vgotest/v2"}, {input: []byte("module github.com/rsc/vgotest/v2"), expected: "github.com/rsc/vgotest/v2"}, {input: []byte("module \"gopkg.in/yaml.v2\""), expected: "gopkg.in/yaml.v2"}, {input: []byte("module gopkg.in/yaml.v2"), expected: "gopkg.in/yaml.v2"}, {input: []byte("module \"gopkg.in/check.v1\"\n"), expected: "gopkg.in/check.v1"}, {input: []byte("module \"gopkg.in/check.v1\n\""), expected: ""}, {input: []byte("module gopkg.in/check.v1\n"), expected: "gopkg.in/check.v1"}, {input: []byte("module \"gopkg.in/check.v1\"\r\n"), expected: "gopkg.in/check.v1"}, {input: []byte("module gopkg.in/check.v1\r\n"), expected: "gopkg.in/check.v1"}, {input: []byte("module \"gopkg.in/check.v1\"\n\n"), expected: "gopkg.in/check.v1"}, {input: []byte("module gopkg.in/check.v1\n\n"), expected: "gopkg.in/check.v1"}, {input: []byte("module \n\"gopkg.in/check.v1\"\n\n"), expected: ""}, {input: []byte("module \ngopkg.in/check.v1\n\n"), expected: ""}, {input: []byte("module \"gopkg.in/check.v1\"asd"), expected: ""}, {input: []byte("module \n\"gopkg.in/check.v1\"\n\n"), expected: ""}, {input: []byte("module \ngopkg.in/check.v1\n\n"), expected: ""}, {input: []byte("module \"gopkg.in/check.v1\"asd"), expected: ""}, {input: []byte("module \nmodule a/b/c "), expected: "a/b/c"}, {input: []byte("module \" \""), expected: " "}, {input: []byte("module "), expected: ""}, {input: []byte("module \" a/b/c \""), expected: " a/b/c "}, {input: []byte("module \"github.com/rsc/vgotest1\" // with a comment"), expected: "github.com/rsc/vgotest1"}, } func TestModulePath(t *testing.T) { for _, test := range modulePathTests { t.Run(string(test.input), func(t *testing.T) { result := ModulePath(test.input) if result != test.expected { t.Fatalf("ModulePath(%q): %s, want %s", string(test.input), result, test.expected) } }) } } func TestParseVersions(t *testing.T) { tests := []struct { desc, input string ok bool laxOK bool // ok=true implies laxOK=true; only set if ok=false }{ // go lines {desc: "empty", input: "module m\ngo \n", ok: false}, {desc: "one", input: "module m\ngo 1\n", ok: false}, {desc: "two", input: "module m\ngo 1.22\n", ok: true}, {desc: "three", input: "module m\ngo 1.22.333", ok: true}, {desc: "before", input: "module m\ngo v1.2\n", ok: false}, {desc: "after", input: "module m\ngo 1.2rc1\n", ok: true}, {desc: "space", input: "module m\ngo 1.2 3.4\n", ok: false}, {desc: "alt1", input: "module m\ngo 1.2.3\n", ok: true}, {desc: "alt2", input: "module m\ngo 1.2rc1\n", ok: true}, {desc: "alt3", input: "module m\ngo 1.2beta1\n", ok: true}, {desc: "alt4", input: "module m\ngo 1.2.beta1\n", ok: false, laxOK: true}, {desc: "alt1", input: "module m\ngo v1.2.3\n", ok: false, laxOK: true}, {desc: "alt2", input: "module m\ngo v1.2rc1\n", ok: false, laxOK: true}, {desc: "alt3", input: "module m\ngo v1.2beta1\n", ok: false, laxOK: true}, {desc: "alt4", input: "module m\ngo v1.2.beta1\n", ok: false, laxOK: true}, {desc: "alt1", input: "module m\ngo v1.2\n", ok: false, laxOK: true}, // toolchain lines {desc: "tool", input: "module m\ntoolchain go1.2\n", ok: true}, {desc: "tool1", input: "module m\ntoolchain go1.2.3\n", ok: true}, {desc: "tool2", input: "module m\ntoolchain go1.2rc1\n", ok: true}, {desc: "tool3", input: "module m\ntoolchain go1.2rc1-gccgo\n", ok: true}, {desc: "tool4", input: "module m\ntoolchain default\n", ok: true}, {desc: "tool5", input: "module m\ntoolchain inconceivable!\n", ok: false, laxOK: true}, } t.Run("Strict", func(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { if _, err := Parse("go.mod", []byte(test.input), nil); err == nil && !test.ok { t.Error("unexpected success") } else if err != nil && test.ok { t.Errorf("unexpected error: %v", err) } }) } }) t.Run("Lax", func(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { if _, err := ParseLax("go.mod", []byte(test.input), nil); err == nil && !(test.ok || test.laxOK) { t.Error("unexpected success") } else if err != nil && test.ok { t.Errorf("unexpected error: %v", err) } }) } }) } func TestComments(t *testing.T) { for _, test := range []struct { desc, input, want string }{ { desc: "comment_only", input: ` // a // b `, want: ` comments before "// a" comments before "// b" `, }, { desc: "line", input: ` // a // b module m // c // d // e `, want: ` comments before "// a" line before "// b" line suffix "// c" comments before "// d" comments before "// e" `, }, { desc: "block", input: ` // a // b block ( // c // d // e x // f // g // h ) // i // j // k `, want: ` comments before "// a" block before "// b" lparen suffix "// c" blockline before "// d" blockline before "" blockline before "// e" blockline suffix "// f" rparen before "// g" rparen before "" rparen before "// h" rparen suffix "// i" comments before "// j" comments before "// k" `, }, { desc: "cr_removed", input: "// a\r\r\n", want: `comments before "// a\r"`, }, } { t.Run(test.desc, func(t *testing.T) { f, err := ParseLax("go.mod", []byte(test.input), nil) if err != nil { t.Fatal(err) } buf := &bytes.Buffer{} printComments := func(prefix string, cs *Comments) { for _, c := range cs.Before { fmt.Fprintf(buf, "%s before %q\n", prefix, c.Token) } for _, c := range cs.Suffix { fmt.Fprintf(buf, "%s suffix %q\n", prefix, c.Token) } for _, c := range cs.After { fmt.Fprintf(buf, "%s after %q\n", prefix, c.Token) } } printComments("file", &f.Syntax.Comments) for _, stmt := range f.Syntax.Stmt { switch stmt := stmt.(type) { case *CommentBlock: printComments("comments", stmt.Comment()) case *Line: printComments("line", stmt.Comment()) case *LineBlock: printComments("block", stmt.Comment()) printComments("lparen", stmt.LParen.Comment()) for _, line := range stmt.Line { printComments("blockline", line.Comment()) } printComments("rparen", stmt.RParen.Comment()) } } got := strings.TrimSpace(buf.String()) want := strings.TrimSpace(test.want) if got != want { t.Errorf("got:\n%s\nwant:\n%s", got, want) } }) } } mod-0.19.0/modfile/rule.go000066400000000000000000001350401463702072700153100ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package modfile implements a parser and formatter for go.mod files. // // The go.mod syntax is described in // https://pkg.go.dev/cmd/go/#hdr-The_go_mod_file. // // The [Parse] and [ParseLax] functions both parse a go.mod file and return an // abstract syntax tree. ParseLax ignores unknown statements and may be used to // parse go.mod files that may have been developed with newer versions of Go. // // The [File] struct returned by Parse and ParseLax represent an abstract // go.mod file. File has several methods like [File.AddNewRequire] and // [File.DropReplace] that can be used to programmatically edit a file. // // The [Format] function formats a File back to a byte slice which can be // written to a file. package modfile import ( "errors" "fmt" "path/filepath" "sort" "strconv" "strings" "unicode" "golang.org/x/mod/internal/lazyregexp" "golang.org/x/mod/module" "golang.org/x/mod/semver" ) // A File is the parsed, interpreted form of a go.mod file. type File struct { Module *Module Go *Go Toolchain *Toolchain Godebug []*Godebug Require []*Require Exclude []*Exclude Replace []*Replace Retract []*Retract Syntax *FileSyntax } // A Module is the module statement. type Module struct { Mod module.Version Deprecated string Syntax *Line } // A Go is the go statement. type Go struct { Version string // "1.23" Syntax *Line } // A Toolchain is the toolchain statement. type Toolchain struct { Name string // "go1.21rc1" Syntax *Line } // A Godebug is a single godebug key=value statement. type Godebug struct { Key string Value string Syntax *Line } // An Exclude is a single exclude statement. type Exclude struct { Mod module.Version Syntax *Line } // A Replace is a single replace statement. type Replace struct { Old module.Version New module.Version Syntax *Line } // A Retract is a single retract statement. type Retract struct { VersionInterval Rationale string Syntax *Line } // A VersionInterval represents a range of versions with upper and lower bounds. // Intervals are closed: both bounds are included. When Low is equal to High, // the interval may refer to a single version ('v1.2.3') or an interval // ('[v1.2.3, v1.2.3]'); both have the same representation. type VersionInterval struct { Low, High string } // A Require is a single require statement. type Require struct { Mod module.Version Indirect bool // has "// indirect" comment Syntax *Line } func (r *Require) markRemoved() { r.Syntax.markRemoved() *r = Require{} } func (r *Require) setVersion(v string) { r.Mod.Version = v if line := r.Syntax; len(line.Token) > 0 { if line.InBlock { // If the line is preceded by an empty line, remove it; see // https://golang.org/issue/33779. if len(line.Comments.Before) == 1 && len(line.Comments.Before[0].Token) == 0 { line.Comments.Before = line.Comments.Before[:0] } if len(line.Token) >= 2 { // example.com v1.2.3 line.Token[1] = v } } else { if len(line.Token) >= 3 { // require example.com v1.2.3 line.Token[2] = v } } } } // setIndirect sets line to have (or not have) a "// indirect" comment. func (r *Require) setIndirect(indirect bool) { r.Indirect = indirect line := r.Syntax if isIndirect(line) == indirect { return } if indirect { // Adding comment. if len(line.Suffix) == 0 { // New comment. line.Suffix = []Comment{{Token: "// indirect", Suffix: true}} return } com := &line.Suffix[0] text := strings.TrimSpace(strings.TrimPrefix(com.Token, string(slashSlash))) if text == "" { // Empty comment. com.Token = "// indirect" return } // Insert at beginning of existing comment. com.Token = "// indirect; " + text return } // Removing comment. f := strings.TrimSpace(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) if f == "indirect" { // Remove whole comment. line.Suffix = nil return } // Remove comment prefix. com := &line.Suffix[0] i := strings.Index(com.Token, "indirect;") com.Token = "//" + com.Token[i+len("indirect;"):] } // isIndirect reports whether line has a "// indirect" comment, // meaning it is in go.mod only for its effect on indirect dependencies, // so that it can be dropped entirely once the effective version of the // indirect dependency reaches the given minimum version. func isIndirect(line *Line) bool { if len(line.Suffix) == 0 { return false } f := strings.Fields(strings.TrimPrefix(line.Suffix[0].Token, string(slashSlash))) return (len(f) == 1 && f[0] == "indirect" || len(f) > 1 && f[0] == "indirect;") } func (f *File) AddModuleStmt(path string) error { if f.Syntax == nil { f.Syntax = new(FileSyntax) } if f.Module == nil { f.Module = &Module{ Mod: module.Version{Path: path}, Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)), } } else { f.Module.Mod.Path = path f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path)) } return nil } func (f *File) AddComment(text string) { if f.Syntax == nil { f.Syntax = new(FileSyntax) } f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{ Comments: Comments{ Before: []Comment{ { Token: text, }, }, }, }) } type VersionFixer func(path, version string) (string, error) // errDontFix is returned by a VersionFixer to indicate the version should be // left alone, even if it's not canonical. var dontFixRetract VersionFixer = func(_, vers string) (string, error) { return vers, nil } // Parse parses and returns a go.mod file. // // file is the name of the file, used in positions and errors. // // data is the content of the file. // // fix is an optional function that canonicalizes module versions. // If fix is nil, all module versions must be canonical ([module.CanonicalVersion] // must return the same string). func Parse(file string, data []byte, fix VersionFixer) (*File, error) { return parseToFile(file, data, fix, true) } // ParseLax is like Parse but ignores unknown statements. // It is used when parsing go.mod files other than the main module, // under the theory that most statement types we add in the future will // only apply in the main module, like exclude and replace, // and so we get better gradual deployments if old go commands // simply ignore those statements when found in go.mod files // in dependencies. func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { return parseToFile(file, data, fix, false) } func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parsed *File, err error) { fs, err := parse(file, data) if err != nil { return nil, err } f := &File{ Syntax: fs, } var errs ErrorList // fix versions in retract directives after the file is parsed. // We need the module path to fix versions, and it might be at the end. defer func() { oldLen := len(errs) f.fixRetract(fix, &errs) if len(errs) > oldLen { parsed, err = nil, errs } }() for _, x := range fs.Stmt { switch x := x.(type) { case *Line: f.add(&errs, nil, x, x.Token[0], x.Token[1:], fix, strict) case *LineBlock: if len(x.Token) > 1 { if strict { errs = append(errs, Error{ Filename: file, Pos: x.Start, Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), }) } continue } switch x.Token[0] { default: if strict { errs = append(errs, Error{ Filename: file, Pos: x.Start, Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), }) } continue case "module", "godebug", "require", "exclude", "replace", "retract": for _, l := range x.Line { f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) } } } } if len(errs) > 0 { return nil, errs } return f, nil } var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?([a-z]+[0-9]+)?$`) var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].*)$`) // Toolchains must be named beginning with `go1`, // like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted. // Note that this regexp is a much looser condition than go/version.IsValid, // for forward compatibility. // (This code has to be work to identify new toolchains even if we tweak the syntax in the future.) var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`) func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { // If strict is false, this module is a dependency. // We ignore all unknown directives as well as main-module-only // directives like replace and exclude. It will work better for // forward compatibility if we can depend on modules that have unknown // statements (presumed relevant only when acting as the main module) // and simply ignore those statements. if !strict { switch verb { case "go", "module", "retract", "require": // want these even for dependency go.mods default: return } } wrapModPathError := func(modPath string, err error) { *errs = append(*errs, Error{ Filename: f.Syntax.Name, Pos: line.Start, ModPath: modPath, Verb: verb, Err: err, }) } wrapError := func(err error) { *errs = append(*errs, Error{ Filename: f.Syntax.Name, Pos: line.Start, Err: err, }) } errorf := func(format string, args ...interface{}) { wrapError(fmt.Errorf(format, args...)) } switch verb { default: errorf("unknown directive: %s", verb) case "go": if f.Go != nil { errorf("repeated go statement") return } if len(args) != 1 { errorf("go directive expects exactly one argument") return } else if !GoVersionRE.MatchString(args[0]) { fixed := false if !strict { if m := laxGoVersionRE.FindStringSubmatch(args[0]); m != nil { args[0] = m[1] fixed = true } } if !fixed { errorf("invalid go version '%s': must match format 1.23.0", args[0]) return } } f.Go = &Go{Syntax: line} f.Go.Version = args[0] case "toolchain": if f.Toolchain != nil { errorf("repeated toolchain statement") return } if len(args) != 1 { errorf("toolchain directive expects exactly one argument") return } else if !ToolchainRE.MatchString(args[0]) { errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) return } f.Toolchain = &Toolchain{Syntax: line} f.Toolchain.Name = args[0] case "module": if f.Module != nil { errorf("repeated module statement") return } deprecated := parseDeprecation(block, line) f.Module = &Module{ Syntax: line, Deprecated: deprecated, } if len(args) != 1 { errorf("usage: module module/path") return } s, err := parseString(&args[0]) if err != nil { errorf("invalid quoted string: %v", err) return } f.Module.Mod = module.Version{Path: s} case "godebug": if len(args) != 1 || strings.ContainsAny(args[0], "\"`',") { errorf("usage: godebug key=value") return } key, value, ok := strings.Cut(args[0], "=") if !ok { errorf("usage: godebug key=value") return } f.Godebug = append(f.Godebug, &Godebug{ Key: key, Value: value, Syntax: line, }) case "require", "exclude": if len(args) != 2 { errorf("usage: %s module/path v1.2.3", verb) return } s, err := parseString(&args[0]) if err != nil { errorf("invalid quoted string: %v", err) return } v, err := parseVersion(verb, s, &args[1], fix) if err != nil { wrapError(err) return } pathMajor, err := modulePathMajor(s) if err != nil { wrapError(err) return } if err := module.CheckPathMajor(v, pathMajor); err != nil { wrapModPathError(s, err) return } if verb == "require" { f.Require = append(f.Require, &Require{ Mod: module.Version{Path: s, Version: v}, Syntax: line, Indirect: isIndirect(line), }) } else { f.Exclude = append(f.Exclude, &Exclude{ Mod: module.Version{Path: s, Version: v}, Syntax: line, }) } case "replace": replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) if wrappederr != nil { *errs = append(*errs, *wrappederr) return } f.Replace = append(f.Replace, replace) case "retract": rationale := parseDirectiveComment(block, line) vi, err := parseVersionInterval(verb, "", &args, dontFixRetract) if err != nil { if strict { wrapError(err) return } else { // Only report errors parsing intervals in the main module. We may // support additional syntax in the future, such as open and half-open // intervals. Those can't be supported now, because they break the // go.mod parser, even in lax mode. return } } if len(args) > 0 && strict { // In the future, there may be additional information after the version. errorf("unexpected token after version: %q", args[0]) return } retract := &Retract{ VersionInterval: vi, Rationale: rationale, Syntax: line, } f.Retract = append(f.Retract, retract) } } func parseReplace(filename string, line *Line, verb string, args []string, fix VersionFixer) (*Replace, *Error) { wrapModPathError := func(modPath string, err error) *Error { return &Error{ Filename: filename, Pos: line.Start, ModPath: modPath, Verb: verb, Err: err, } } wrapError := func(err error) *Error { return &Error{ Filename: filename, Pos: line.Start, Err: err, } } errorf := func(format string, args ...interface{}) *Error { return wrapError(fmt.Errorf(format, args...)) } arrow := 2 if len(args) >= 2 && args[1] == "=>" { arrow = 1 } if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" { return nil, errorf("usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory", verb, verb) } s, err := parseString(&args[0]) if err != nil { return nil, errorf("invalid quoted string: %v", err) } pathMajor, err := modulePathMajor(s) if err != nil { return nil, wrapModPathError(s, err) } var v string if arrow == 2 { v, err = parseVersion(verb, s, &args[1], fix) if err != nil { return nil, wrapError(err) } if err := module.CheckPathMajor(v, pathMajor); err != nil { return nil, wrapModPathError(s, err) } } ns, err := parseString(&args[arrow+1]) if err != nil { return nil, errorf("invalid quoted string: %v", err) } nv := "" if len(args) == arrow+2 { if !IsDirectoryPath(ns) { if strings.Contains(ns, "@") { return nil, errorf("replacement module must match format 'path version', not 'path@version'") } return nil, errorf("replacement module without version must be directory path (rooted or starting with . or ..)") } if filepath.Separator == '/' && strings.Contains(ns, `\`) { return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") } } if len(args) == arrow+3 { nv, err = parseVersion(verb, ns, &args[arrow+2], fix) if err != nil { return nil, wrapError(err) } if IsDirectoryPath(ns) { return nil, errorf("replacement module directory path %q cannot have version", ns) } } return &Replace{ Old: module.Version{Path: s, Version: v}, New: module.Version{Path: ns, Version: nv}, Syntax: line, }, nil } // fixRetract applies fix to each retract directive in f, appending any errors // to errs. // // Most versions are fixed as we parse the file, but for retract directives, // the relevant module path is the one specified with the module directive, // and that might appear at the end of the file (or not at all). func (f *File) fixRetract(fix VersionFixer, errs *ErrorList) { if fix == nil { return } path := "" if f.Module != nil { path = f.Module.Mod.Path } var r *Retract wrapError := func(err error) { *errs = append(*errs, Error{ Filename: f.Syntax.Name, Pos: r.Syntax.Start, Err: err, }) } for _, r = range f.Retract { if path == "" { wrapError(errors.New("no module directive found, so retract cannot be used")) return // only print the first one of these } args := r.Syntax.Token if args[0] == "retract" { args = args[1:] } vi, err := parseVersionInterval("retract", path, &args, fix) if err != nil { wrapError(err) } r.VersionInterval = vi } } func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, fix VersionFixer) { wrapError := func(err error) { *errs = append(*errs, Error{ Filename: f.Syntax.Name, Pos: line.Start, Err: err, }) } errorf := func(format string, args ...interface{}) { wrapError(fmt.Errorf(format, args...)) } switch verb { default: errorf("unknown directive: %s", verb) case "go": if f.Go != nil { errorf("repeated go statement") return } if len(args) != 1 { errorf("go directive expects exactly one argument") return } else if !GoVersionRE.MatchString(args[0]) { errorf("invalid go version '%s': must match format 1.23.0", args[0]) return } f.Go = &Go{Syntax: line} f.Go.Version = args[0] case "toolchain": if f.Toolchain != nil { errorf("repeated toolchain statement") return } if len(args) != 1 { errorf("toolchain directive expects exactly one argument") return } else if !ToolchainRE.MatchString(args[0]) { errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) return } f.Toolchain = &Toolchain{Syntax: line} f.Toolchain.Name = args[0] case "godebug": if len(args) != 1 || strings.ContainsAny(args[0], "\"`',") { errorf("usage: godebug key=value") return } key, value, ok := strings.Cut(args[0], "=") if !ok { errorf("usage: godebug key=value") return } f.Godebug = append(f.Godebug, &Godebug{ Key: key, Value: value, Syntax: line, }) case "use": if len(args) != 1 { errorf("usage: %s local/dir", verb) return } s, err := parseString(&args[0]) if err != nil { errorf("invalid quoted string: %v", err) return } f.Use = append(f.Use, &Use{ Path: s, Syntax: line, }) case "replace": replace, wrappederr := parseReplace(f.Syntax.Name, line, verb, args, fix) if wrappederr != nil { *errs = append(*errs, *wrappederr) return } f.Replace = append(f.Replace, replace) } } // IsDirectoryPath reports whether the given path should be interpreted as a directory path. // Just like on the go command line, relative paths starting with a '.' or '..' path component // and rooted paths are directory paths; the rest are module paths. func IsDirectoryPath(ns string) bool { // Because go.mod files can move from one system to another, // we check all known path syntaxes, both Unix and Windows. return ns == "." || strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, `.\`) || ns == ".." || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, "/") || strings.HasPrefix(ns, `\`) || len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' } // MustQuote reports whether s must be quoted in order to appear as // a single token in a go.mod line. func MustQuote(s string) bool { for _, r := range s { switch r { case ' ', '"', '\'', '`': return true case '(', ')', '[', ']', '{', '}', ',': if len(s) > 1 { return true } default: if !unicode.IsPrint(r) { return true } } } return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*") } // AutoQuote returns s or, if quoting is required for s to appear in a go.mod, // the quotation of s. func AutoQuote(s string) string { if MustQuote(s) { return strconv.Quote(s) } return s } func parseVersionInterval(verb string, path string, args *[]string, fix VersionFixer) (VersionInterval, error) { toks := *args if len(toks) == 0 || toks[0] == "(" { return VersionInterval{}, fmt.Errorf("expected '[' or version") } if toks[0] != "[" { v, err := parseVersion(verb, path, &toks[0], fix) if err != nil { return VersionInterval{}, err } *args = toks[1:] return VersionInterval{Low: v, High: v}, nil } toks = toks[1:] if len(toks) == 0 { return VersionInterval{}, fmt.Errorf("expected version after '['") } low, err := parseVersion(verb, path, &toks[0], fix) if err != nil { return VersionInterval{}, err } toks = toks[1:] if len(toks) == 0 || toks[0] != "," { return VersionInterval{}, fmt.Errorf("expected ',' after version") } toks = toks[1:] if len(toks) == 0 { return VersionInterval{}, fmt.Errorf("expected version after ','") } high, err := parseVersion(verb, path, &toks[0], fix) if err != nil { return VersionInterval{}, err } toks = toks[1:] if len(toks) == 0 || toks[0] != "]" { return VersionInterval{}, fmt.Errorf("expected ']' after version") } toks = toks[1:] *args = toks return VersionInterval{Low: low, High: high}, nil } func parseString(s *string) (string, error) { t := *s if strings.HasPrefix(t, `"`) { var err error if t, err = strconv.Unquote(t); err != nil { return "", err } } else if strings.ContainsAny(t, "\"'`") { // Other quotes are reserved both for possible future expansion // and to avoid confusion. For example if someone types 'x' // we want that to be a syntax error and not a literal x in literal quotation marks. return "", fmt.Errorf("unquoted string cannot contain quote") } *s = AutoQuote(t) return t, nil } var deprecatedRE = lazyregexp.New(`(?s)(?:^|\n\n)Deprecated: *(.*?)(?:$|\n\n)`) // parseDeprecation extracts the text of comments on a "module" directive and // extracts a deprecation message from that. // // A deprecation message is contained in a paragraph within a block of comments // that starts with "Deprecated:" (case sensitive). The message runs until the // end of the paragraph and does not include the "Deprecated:" prefix. If the // comment block has multiple paragraphs that start with "Deprecated:", // parseDeprecation returns the message from the first. func parseDeprecation(block *LineBlock, line *Line) string { text := parseDirectiveComment(block, line) m := deprecatedRE.FindStringSubmatch(text) if m == nil { return "" } return m[1] } // parseDirectiveComment extracts the text of comments on a directive. // If the directive's line does not have comments and is part of a block that // does have comments, the block's comments are used. func parseDirectiveComment(block *LineBlock, line *Line) string { comments := line.Comment() if block != nil && len(comments.Before) == 0 && len(comments.Suffix) == 0 { comments = block.Comment() } groups := [][]Comment{comments.Before, comments.Suffix} var lines []string for _, g := range groups { for _, c := range g { if !strings.HasPrefix(c.Token, "//") { continue // blank line } lines = append(lines, strings.TrimSpace(strings.TrimPrefix(c.Token, "//"))) } } return strings.Join(lines, "\n") } type ErrorList []Error func (e ErrorList) Error() string { errStrs := make([]string, len(e)) for i, err := range e { errStrs[i] = err.Error() } return strings.Join(errStrs, "\n") } type Error struct { Filename string Pos Position Verb string ModPath string Err error } func (e *Error) Error() string { var pos string if e.Pos.LineRune > 1 { // Don't print LineRune if it's 1 (beginning of line). // It's always 1 except in scanner errors, which are rare. pos = fmt.Sprintf("%s:%d:%d: ", e.Filename, e.Pos.Line, e.Pos.LineRune) } else if e.Pos.Line > 0 { pos = fmt.Sprintf("%s:%d: ", e.Filename, e.Pos.Line) } else if e.Filename != "" { pos = fmt.Sprintf("%s: ", e.Filename) } var directive string if e.ModPath != "" { directive = fmt.Sprintf("%s %s: ", e.Verb, e.ModPath) } else if e.Verb != "" { directive = fmt.Sprintf("%s: ", e.Verb) } return pos + directive + e.Err.Error() } func (e *Error) Unwrap() error { return e.Err } func parseVersion(verb string, path string, s *string, fix VersionFixer) (string, error) { t, err := parseString(s) if err != nil { return "", &Error{ Verb: verb, ModPath: path, Err: &module.InvalidVersionError{ Version: *s, Err: err, }, } } if fix != nil { fixed, err := fix(path, t) if err != nil { if err, ok := err.(*module.ModuleError); ok { return "", &Error{ Verb: verb, ModPath: path, Err: err.Err, } } return "", err } t = fixed } else { cv := module.CanonicalVersion(t) if cv == "" { return "", &Error{ Verb: verb, ModPath: path, Err: &module.InvalidVersionError{ Version: t, Err: errors.New("must be of the form v1.2.3"), }, } } t = cv } *s = t return *s, nil } func modulePathMajor(path string) (string, error) { _, major, ok := module.SplitPathVersion(path) if !ok { return "", fmt.Errorf("invalid module path") } return major, nil } func (f *File) Format() ([]byte, error) { return Format(f.Syntax), nil } // Cleanup cleans up the file f after any edit operations. // To avoid quadratic behavior, modifications like [File.DropRequire] // clear the entry but do not remove it from the slice. // Cleanup cleans out all the cleared entries. func (f *File) Cleanup() { w := 0 for _, g := range f.Godebug { if g.Key != "" { f.Godebug[w] = g w++ } } f.Godebug = f.Godebug[:w] w = 0 for _, r := range f.Require { if r.Mod.Path != "" { f.Require[w] = r w++ } } f.Require = f.Require[:w] w = 0 for _, x := range f.Exclude { if x.Mod.Path != "" { f.Exclude[w] = x w++ } } f.Exclude = f.Exclude[:w] w = 0 for _, r := range f.Replace { if r.Old.Path != "" { f.Replace[w] = r w++ } } f.Replace = f.Replace[:w] w = 0 for _, r := range f.Retract { if r.Low != "" || r.High != "" { f.Retract[w] = r w++ } } f.Retract = f.Retract[:w] f.Syntax.Cleanup() } func (f *File) AddGoStmt(version string) error { if !GoVersionRE.MatchString(version) { return fmt.Errorf("invalid language version %q", version) } if f.Go == nil { var hint Expr if f.Module != nil && f.Module.Syntax != nil { hint = f.Module.Syntax } else if f.Syntax == nil { f.Syntax = new(FileSyntax) } f.Go = &Go{ Version: version, Syntax: f.Syntax.addLine(hint, "go", version), } } else { f.Go.Version = version f.Syntax.updateLine(f.Go.Syntax, "go", version) } return nil } // DropGoStmt deletes the go statement from the file. func (f *File) DropGoStmt() { if f.Go != nil { f.Go.Syntax.markRemoved() f.Go = nil } } // DropToolchainStmt deletes the toolchain statement from the file. func (f *File) DropToolchainStmt() { if f.Toolchain != nil { f.Toolchain.Syntax.markRemoved() f.Toolchain = nil } } func (f *File) AddToolchainStmt(name string) error { if !ToolchainRE.MatchString(name) { return fmt.Errorf("invalid toolchain name %q", name) } if f.Toolchain == nil { var hint Expr if f.Go != nil && f.Go.Syntax != nil { hint = f.Go.Syntax } else if f.Module != nil && f.Module.Syntax != nil { hint = f.Module.Syntax } f.Toolchain = &Toolchain{ Name: name, Syntax: f.Syntax.addLine(hint, "toolchain", name), } } else { f.Toolchain.Name = name f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) } return nil } // AddGodebug sets the first godebug line for key to value, // preserving any existing comments for that line and removing all // other godebug lines for key. // // If no line currently exists for key, AddGodebug adds a new line // at the end of the last godebug block. func (f *File) AddGodebug(key, value string) error { need := true for _, g := range f.Godebug { if g.Key == key { if need { g.Value = value f.Syntax.updateLine(g.Syntax, "godebug", key+"="+value) need = false } else { g.Syntax.markRemoved() *g = Godebug{} } } } if need { f.addNewGodebug(key, value) } return nil } // addNewGodebug adds a new godebug key=value line at the end // of the last godebug block, regardless of any existing godebug lines for key. func (f *File) addNewGodebug(key, value string) { line := f.Syntax.addLine(nil, "godebug", key+"="+value) g := &Godebug{ Key: key, Value: value, Syntax: line, } f.Godebug = append(f.Godebug, g) } // AddRequire sets the first require line for path to version vers, // preserving any existing comments for that line and removing all // other lines for path. // // If no line currently exists for path, AddRequire adds a new line // at the end of the last require block. func (f *File) AddRequire(path, vers string) error { need := true for _, r := range f.Require { if r.Mod.Path == path { if need { r.Mod.Version = vers f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers) need = false } else { r.Syntax.markRemoved() *r = Require{} } } } if need { f.AddNewRequire(path, vers, false) } return nil } // AddNewRequire adds a new require line for path at version vers at the end of // the last require block, regardless of any existing require lines for path. func (f *File) AddNewRequire(path, vers string, indirect bool) { line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers) r := &Require{ Mod: module.Version{Path: path, Version: vers}, Syntax: line, } r.setIndirect(indirect) f.Require = append(f.Require, r) } // SetRequire updates the requirements of f to contain exactly req, preserving // the existing block structure and line comment contents (except for 'indirect' // markings) for the first requirement on each named module path. // // The Syntax field is ignored for the requirements in req. // // Any requirements not already present in the file are added to the block // containing the last require line. // // The requirements in req must specify at most one distinct version for each // module path. // // If any existing requirements may be removed, the caller should call // [File.Cleanup] after all edits are complete. func (f *File) SetRequire(req []*Require) { type elem struct { version string indirect bool } need := make(map[string]elem) for _, r := range req { if prev, dup := need[r.Mod.Path]; dup && prev.version != r.Mod.Version { panic(fmt.Errorf("SetRequire called with conflicting versions for path %s (%s and %s)", r.Mod.Path, prev.version, r.Mod.Version)) } need[r.Mod.Path] = elem{r.Mod.Version, r.Indirect} } // Update or delete the existing Require entries to preserve // only the first for each module path in req. for _, r := range f.Require { e, ok := need[r.Mod.Path] if ok { r.setVersion(e.version) r.setIndirect(e.indirect) } else { r.markRemoved() } delete(need, r.Mod.Path) } // Add new entries in the last block of the file for any paths that weren't // already present. // // This step is nondeterministic, but the final result will be deterministic // because we will sort the block. for path, e := range need { f.AddNewRequire(path, e.version, e.indirect) } f.SortBlocks() } // SetRequireSeparateIndirect updates the requirements of f to contain the given // requirements. Comment contents (except for 'indirect' markings) are retained // from the first existing requirement for each module path. Like SetRequire, // SetRequireSeparateIndirect adds requirements for new paths in req, // updates the version and "// indirect" comment on existing requirements, // and deletes requirements on paths not in req. Existing duplicate requirements // are deleted. // // As its name suggests, SetRequireSeparateIndirect puts direct and indirect // requirements into two separate blocks, one containing only direct // requirements, and the other containing only indirect requirements. // SetRequireSeparateIndirect may move requirements between these two blocks // when their indirect markings change. However, SetRequireSeparateIndirect // won't move requirements from other blocks, especially blocks with comments. // // If the file initially has one uncommented block of requirements, // SetRequireSeparateIndirect will split it into a direct-only and indirect-only // block. This aids in the transition to separate blocks. func (f *File) SetRequireSeparateIndirect(req []*Require) { // hasComments returns whether a line or block has comments // other than "indirect". hasComments := func(c Comments) bool { return len(c.Before) > 0 || len(c.After) > 0 || len(c.Suffix) > 1 || (len(c.Suffix) == 1 && strings.TrimSpace(strings.TrimPrefix(c.Suffix[0].Token, string(slashSlash))) != "indirect") } // moveReq adds r to block. If r was in another block, moveReq deletes // it from that block and transfers its comments. moveReq := func(r *Require, block *LineBlock) { var line *Line if r.Syntax == nil { line = &Line{Token: []string{AutoQuote(r.Mod.Path), r.Mod.Version}} r.Syntax = line if r.Indirect { r.setIndirect(true) } } else { line = new(Line) *line = *r.Syntax if !line.InBlock && len(line.Token) > 0 && line.Token[0] == "require" { line.Token = line.Token[1:] } r.Syntax.Token = nil // Cleanup will delete the old line. r.Syntax = line } line.InBlock = true block.Line = append(block.Line, line) } // Examine existing require lines and blocks. var ( // We may insert new requirements into the last uncommented // direct-only and indirect-only blocks. We may also move requirements // to the opposite block if their indirect markings change. lastDirectIndex = -1 lastIndirectIndex = -1 // If there are no direct-only or indirect-only blocks, a new block may // be inserted after the last require line or block. lastRequireIndex = -1 // If there's only one require line or block, and it's uncommented, // we'll move its requirements to the direct-only or indirect-only blocks. requireLineOrBlockCount = 0 // Track the block each requirement belongs to (if any) so we can // move them later. lineToBlock = make(map[*Line]*LineBlock) ) for i, stmt := range f.Syntax.Stmt { switch stmt := stmt.(type) { case *Line: if len(stmt.Token) == 0 || stmt.Token[0] != "require" { continue } lastRequireIndex = i requireLineOrBlockCount++ if !hasComments(stmt.Comments) { if isIndirect(stmt) { lastIndirectIndex = i } else { lastDirectIndex = i } } case *LineBlock: if len(stmt.Token) == 0 || stmt.Token[0] != "require" { continue } lastRequireIndex = i requireLineOrBlockCount++ allDirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) allIndirect := len(stmt.Line) > 0 && !hasComments(stmt.Comments) for _, line := range stmt.Line { lineToBlock[line] = stmt if hasComments(line.Comments) { allDirect = false allIndirect = false } else if isIndirect(line) { allDirect = false } else { allIndirect = false } } if allDirect { lastDirectIndex = i } if allIndirect { lastIndirectIndex = i } } } oneFlatUncommentedBlock := requireLineOrBlockCount == 1 && !hasComments(*f.Syntax.Stmt[lastRequireIndex].Comment()) // Create direct and indirect blocks if needed. Convert lines into blocks // if needed. If we end up with an empty block or a one-line block, // Cleanup will delete it or convert it to a line later. insertBlock := func(i int) *LineBlock { block := &LineBlock{Token: []string{"require"}} f.Syntax.Stmt = append(f.Syntax.Stmt, nil) copy(f.Syntax.Stmt[i+1:], f.Syntax.Stmt[i:]) f.Syntax.Stmt[i] = block return block } ensureBlock := func(i int) *LineBlock { switch stmt := f.Syntax.Stmt[i].(type) { case *LineBlock: return stmt case *Line: block := &LineBlock{ Token: []string{"require"}, Line: []*Line{stmt}, } stmt.Token = stmt.Token[1:] // remove "require" stmt.InBlock = true f.Syntax.Stmt[i] = block return block default: panic(fmt.Sprintf("unexpected statement: %v", stmt)) } } var lastDirectBlock *LineBlock if lastDirectIndex < 0 { if lastIndirectIndex >= 0 { lastDirectIndex = lastIndirectIndex lastIndirectIndex++ } else if lastRequireIndex >= 0 { lastDirectIndex = lastRequireIndex + 1 } else { lastDirectIndex = len(f.Syntax.Stmt) } lastDirectBlock = insertBlock(lastDirectIndex) } else { lastDirectBlock = ensureBlock(lastDirectIndex) } var lastIndirectBlock *LineBlock if lastIndirectIndex < 0 { lastIndirectIndex = lastDirectIndex + 1 lastIndirectBlock = insertBlock(lastIndirectIndex) } else { lastIndirectBlock = ensureBlock(lastIndirectIndex) } // Delete requirements we don't want anymore. // Update versions and indirect comments on requirements we want to keep. // If a requirement is in last{Direct,Indirect}Block with the wrong // indirect marking after this, or if the requirement is in an single // uncommented mixed block (oneFlatUncommentedBlock), move it to the // correct block. // // Some blocks may be empty after this. Cleanup will remove them. need := make(map[string]*Require) for _, r := range req { need[r.Mod.Path] = r } have := make(map[string]*Require) for _, r := range f.Require { path := r.Mod.Path if need[path] == nil || have[path] != nil { // Requirement not needed, or duplicate requirement. Delete. r.markRemoved() continue } have[r.Mod.Path] = r r.setVersion(need[path].Mod.Version) r.setIndirect(need[path].Indirect) if need[path].Indirect && (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastDirectBlock) { moveReq(r, lastIndirectBlock) } else if !need[path].Indirect && (oneFlatUncommentedBlock || lineToBlock[r.Syntax] == lastIndirectBlock) { moveReq(r, lastDirectBlock) } } // Add new requirements. for path, r := range need { if have[path] == nil { if r.Indirect { moveReq(r, lastIndirectBlock) } else { moveReq(r, lastDirectBlock) } f.Require = append(f.Require, r) } } f.SortBlocks() } func (f *File) DropGodebug(key string) error { for _, g := range f.Godebug { if g.Key == key { g.Syntax.markRemoved() *g = Godebug{} } } return nil } func (f *File) DropRequire(path string) error { for _, r := range f.Require { if r.Mod.Path == path { r.Syntax.markRemoved() *r = Require{} } } return nil } // AddExclude adds a exclude statement to the mod file. Errors if the provided // version is not a canonical version string func (f *File) AddExclude(path, vers string) error { if err := checkCanonicalVersion(path, vers); err != nil { return err } var hint *Line for _, x := range f.Exclude { if x.Mod.Path == path && x.Mod.Version == vers { return nil } if x.Mod.Path == path { hint = x.Syntax } } f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)}) return nil } func (f *File) DropExclude(path, vers string) error { for _, x := range f.Exclude { if x.Mod.Path == path && x.Mod.Version == vers { x.Syntax.markRemoved() *x = Exclude{} } } return nil } func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error { return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) } func addReplace(syntax *FileSyntax, replace *[]*Replace, oldPath, oldVers, newPath, newVers string) error { need := true old := module.Version{Path: oldPath, Version: oldVers} new := module.Version{Path: newPath, Version: newVers} tokens := []string{"replace", AutoQuote(oldPath)} if oldVers != "" { tokens = append(tokens, oldVers) } tokens = append(tokens, "=>", AutoQuote(newPath)) if newVers != "" { tokens = append(tokens, newVers) } var hint *Line for _, r := range *replace { if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) { if need { // Found replacement for old; update to use new. r.New = new syntax.updateLine(r.Syntax, tokens...) need = false continue } // Already added; delete other replacements for same. r.Syntax.markRemoved() *r = Replace{} } if r.Old.Path == oldPath { hint = r.Syntax } } if need { *replace = append(*replace, &Replace{Old: old, New: new, Syntax: syntax.addLine(hint, tokens...)}) } return nil } func (f *File) DropReplace(oldPath, oldVers string) error { for _, r := range f.Replace { if r.Old.Path == oldPath && r.Old.Version == oldVers { r.Syntax.markRemoved() *r = Replace{} } } return nil } // AddRetract adds a retract statement to the mod file. Errors if the provided // version interval does not consist of canonical version strings func (f *File) AddRetract(vi VersionInterval, rationale string) error { var path string if f.Module != nil { path = f.Module.Mod.Path } if err := checkCanonicalVersion(path, vi.High); err != nil { return err } if err := checkCanonicalVersion(path, vi.Low); err != nil { return err } r := &Retract{ VersionInterval: vi, } if vi.Low == vi.High { r.Syntax = f.Syntax.addLine(nil, "retract", AutoQuote(vi.Low)) } else { r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") } if rationale != "" { for _, line := range strings.Split(rationale, "\n") { com := Comment{Token: "// " + line} r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) } } return nil } func (f *File) DropRetract(vi VersionInterval) error { for _, r := range f.Retract { if r.VersionInterval == vi { r.Syntax.markRemoved() *r = Retract{} } } return nil } func (f *File) SortBlocks() { f.removeDups() // otherwise sorting is unsafe // semanticSortForExcludeVersionV is the Go version (plus leading "v") at which // lines in exclude blocks start to use semantic sort instead of lexicographic sort. // See go.dev/issue/60028. const semanticSortForExcludeVersionV = "v1.21" useSemanticSortForExclude := f.Go != nil && semver.Compare("v"+f.Go.Version, semanticSortForExcludeVersionV) >= 0 for _, stmt := range f.Syntax.Stmt { block, ok := stmt.(*LineBlock) if !ok { continue } less := lineLess if block.Token[0] == "exclude" && useSemanticSortForExclude { less = lineExcludeLess } else if block.Token[0] == "retract" { less = lineRetractLess } sort.SliceStable(block.Line, func(i, j int) bool { return less(block.Line[i], block.Line[j]) }) } } // removeDups removes duplicate exclude and replace directives. // // Earlier exclude directives take priority. // // Later replace directives take priority. // // require directives are not de-duplicated. That's left up to higher-level // logic (MVS). // // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *File) removeDups() { removeDups(f.Syntax, &f.Exclude, &f.Replace) } func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { kill := make(map[*Line]bool) // Remove duplicate excludes. if exclude != nil { haveExclude := make(map[module.Version]bool) for _, x := range *exclude { if haveExclude[x.Mod] { kill[x.Syntax] = true continue } haveExclude[x.Mod] = true } var excl []*Exclude for _, x := range *exclude { if !kill[x.Syntax] { excl = append(excl, x) } } *exclude = excl } // Remove duplicate replacements. // Later replacements take priority over earlier ones. haveReplace := make(map[module.Version]bool) for i := len(*replace) - 1; i >= 0; i-- { x := (*replace)[i] if haveReplace[x.Old] { kill[x.Syntax] = true continue } haveReplace[x.Old] = true } var repl []*Replace for _, x := range *replace { if !kill[x.Syntax] { repl = append(repl, x) } } *replace = repl // Duplicate require and retract directives are not removed. // Drop killed statements from the syntax tree. var stmts []Expr for _, stmt := range syntax.Stmt { switch stmt := stmt.(type) { case *Line: if kill[stmt] { continue } case *LineBlock: var lines []*Line for _, line := range stmt.Line { if !kill[line] { lines = append(lines, line) } } stmt.Line = lines if len(lines) == 0 { continue } } stmts = append(stmts, stmt) } syntax.Stmt = stmts } // lineLess returns whether li should be sorted before lj. It sorts // lexicographically without assigning any special meaning to tokens. func lineLess(li, lj *Line) bool { for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { if li.Token[k] != lj.Token[k] { return li.Token[k] < lj.Token[k] } } return len(li.Token) < len(lj.Token) } // lineExcludeLess reports whether li should be sorted before lj for lines in // an "exclude" block. func lineExcludeLess(li, lj *Line) bool { if len(li.Token) != 2 || len(lj.Token) != 2 { // Not a known exclude specification. // Fall back to sorting lexicographically. return lineLess(li, lj) } // An exclude specification has two tokens: ModulePath and Version. // Compare module path by string order and version by semver rules. if pi, pj := li.Token[0], lj.Token[0]; pi != pj { return pi < pj } return semver.Compare(li.Token[1], lj.Token[1]) < 0 } // lineRetractLess returns whether li should be sorted before lj for lines in // a "retract" block. It treats each line as a version interval. Single versions // are compared as if they were intervals with the same low and high version. // Intervals are sorted in descending order, first by low version, then by // high version, using semver.Compare. func lineRetractLess(li, lj *Line) bool { interval := func(l *Line) VersionInterval { if len(l.Token) == 1 { return VersionInterval{Low: l.Token[0], High: l.Token[0]} } else if len(l.Token) == 5 && l.Token[0] == "[" && l.Token[2] == "," && l.Token[4] == "]" { return VersionInterval{Low: l.Token[1], High: l.Token[3]} } else { // Line in unknown format. Treat as an invalid version. return VersionInterval{} } } vii := interval(li) vij := interval(lj) if cmp := semver.Compare(vii.Low, vij.Low); cmp != 0 { return cmp > 0 } return semver.Compare(vii.High, vij.High) > 0 } // checkCanonicalVersion returns a non-nil error if vers is not a canonical // version string or does not match the major version of path. // // If path is non-empty, the error text suggests a format with a major version // corresponding to the path. func checkCanonicalVersion(path, vers string) error { _, pathMajor, pathMajorOk := module.SplitPathVersion(path) if vers == "" || vers != module.CanonicalVersion(vers) { if pathMajor == "" { return &module.InvalidVersionError{ Version: vers, Err: fmt.Errorf("must be of the form v1.2.3"), } } return &module.InvalidVersionError{ Version: vers, Err: fmt.Errorf("must be of the form %s.2.3", module.PathMajorPrefix(pathMajor)), } } if pathMajorOk { if err := module.CheckPathMajor(vers, pathMajor); err != nil { if pathMajor == "" { // In this context, the user probably wrote "v2.3.4" when they meant // "v2.3.4+incompatible". Suggest that instead of "v0 or v1". return &module.InvalidVersionError{ Version: vers, Err: fmt.Errorf("should be %s+incompatible (or module %s/%v)", vers, path, semver.Major(vers)), } } return err } } return nil } mod-0.19.0/modfile/rule_test.go000066400000000000000000001014451463702072700163510ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modfile import ( "bytes" "fmt" "strings" "testing" "golang.org/x/mod/module" ) var addRequireTests = []struct { desc string in string path string vers string out string }{ { `existing`, ` module m require x.y/z v1.2.3 `, "x.y/z", "v1.5.6", ` module m require x.y/z v1.5.6 `, }, { `existing2`, ` module m require ( x.y/z v1.2.3 // first x.z/a v0.1.0 // first-a ) require x.y/z v1.4.5 // second require ( x.y/z v1.6.7 // third x.z/a v0.2.0 // third-a ) `, "x.y/z", "v1.8.9", ` module m require ( x.y/z v1.8.9 // first x.z/a v0.1.0 // first-a ) require x.z/a v0.2.0 // third-a `, }, { `new`, ` module m require x.y/z v1.2.3 `, "x.y/w", "v1.5.6", ` module m require ( x.y/z v1.2.3 x.y/w v1.5.6 ) `, }, { `new2`, ` module m require x.y/z v1.2.3 require x.y/q/v2 v2.3.4 `, "x.y/w", "v1.5.6", ` module m require x.y/z v1.2.3 require ( x.y/q/v2 v2.3.4 x.y/w v1.5.6 ) `, }, { `unattached_comments`, ` module m require ( foo v0.0.0-00010101000000-000000000000 // bar v0.0.0-00010101000000-000000000000 ) `, "foo", "v0.0.0-00010101000000-000000000000", ` module m require ( foo v0.0.0-00010101000000-000000000000 // bar v0.0.0-00010101000000-000000000000 ) `, }, } type require struct { path, vers string indirect bool } var setRequireTests = []struct { desc string in string mods []require out string }{ { `https://golang.org/issue/45932`, `module m require ( x.y/a v1.2.3 //indirect x.y/b v1.2.3 x.y/c v1.2.3 ) `, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", false}, {"x.y/c", "v1.2.3", false}, }, `module m require ( x.y/a v1.2.3 x.y/b v1.2.3 x.y/c v1.2.3 ) `, }, { `existing`, `module m require ( x.y/b v1.2.3 x.y/a v1.2.3 x.y/d v1.2.3 ) `, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", false}, {"x.y/c", "v1.2.3", false}, }, `module m require ( x.y/a v1.2.3 x.y/b v1.2.3 x.y/c v1.2.3 ) `, }, { `existing_indirect`, `module m require ( x.y/a v1.2.3 x.y/b v1.2.3 // x.y/c v1.2.3 //c x.y/d v1.2.3 // c x.y/e v1.2.3 // indirect x.y/f v1.2.3 //indirect x.y/g v1.2.3 // indirect ) `, []require{ {"x.y/a", "v1.2.3", true}, {"x.y/b", "v1.2.3", true}, {"x.y/c", "v1.2.3", true}, {"x.y/d", "v1.2.3", true}, {"x.y/e", "v1.2.3", true}, {"x.y/f", "v1.2.3", true}, {"x.y/g", "v1.2.3", true}, }, `module m require ( x.y/a v1.2.3 // indirect x.y/b v1.2.3 // indirect x.y/c v1.2.3 // indirect; c x.y/d v1.2.3 // indirect; c x.y/e v1.2.3 // indirect x.y/f v1.2.3 //indirect x.y/g v1.2.3 // indirect ) `, }, { `existing_multi`, `module m require x.y/a v1.2.3 require x.y/b v1.2.3 require x.y/c v1.0.0 // not v1.2.3! require x.y/d v1.2.3 // comment kept require x.y/e v1.2.3 // comment kept require x.y/f v1.2.3 // indirect require x.y/g v1.2.3 // indirect `, []require{ {"x.y/h", "v1.2.3", false}, {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", false}, {"x.y/c", "v1.2.3", false}, {"x.y/d", "v1.2.3", false}, {"x.y/e", "v1.2.3", true}, {"x.y/f", "v1.2.3", false}, {"x.y/g", "v1.2.3", false}, }, `module m require x.y/a v1.2.3 require x.y/b v1.2.3 require x.y/c v1.2.3 // not v1.2.3! require x.y/d v1.2.3 // comment kept require x.y/e v1.2.3 // indirect; comment kept require x.y/f v1.2.3 require ( x.y/g v1.2.3 x.y/h v1.2.3 ) `, }, { `existing_duplicate`, `module m require ( x.y/a v1.0.0 // zero x.y/a v1.1.0 // one x.y/a v1.2.3 // two ) `, []require{ {"x.y/a", "v1.2.3", true}, }, `module m require x.y/a v1.2.3 // indirect; zero `, }, { `existing_duplicate_multi`, `module m require x.y/a v1.0.0 // zero require x.y/a v1.1.0 // one require x.y/a v1.2.3 // two `, []require{ {"x.y/a", "v1.2.3", true}, }, `module m require x.y/a v1.2.3 // indirect; zero `, }, } var setRequireSeparateIndirectTests = []struct { desc string in string mods []require out string }{ { `https://golang.org/issue/45932`, `module m require ( x.y/a v1.2.3 //indirect x.y/b v1.2.3 x.y/c v1.2.3 ) `, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", false}, {"x.y/c", "v1.2.3", false}, }, `module m require ( x.y/a v1.2.3 x.y/b v1.2.3 x.y/c v1.2.3 ) `, }, { `existing`, `module m require ( x.y/b v1.2.3 x.y/a v1.2.3 x.y/d v1.2.3 ) `, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", false}, {"x.y/c", "v1.2.3", false}, }, `module m require ( x.y/a v1.2.3 x.y/b v1.2.3 x.y/c v1.2.3 ) `, }, { `existing_indirect`, `module m require ( x.y/a v1.2.3 x.y/b v1.2.3 // x.y/c v1.2.3 //c x.y/d v1.2.3 // c x.y/e v1.2.3 // indirect x.y/f v1.2.3 //indirect x.y/g v1.2.3 // indirect ) `, []require{ {"x.y/a", "v1.2.3", true}, {"x.y/b", "v1.2.3", true}, {"x.y/c", "v1.2.3", true}, {"x.y/d", "v1.2.3", true}, {"x.y/e", "v1.2.3", true}, {"x.y/f", "v1.2.3", true}, {"x.y/g", "v1.2.3", true}, }, `module m require ( x.y/a v1.2.3 // indirect x.y/b v1.2.3 // indirect x.y/c v1.2.3 // indirect; c x.y/d v1.2.3 // indirect; c x.y/e v1.2.3 // indirect x.y/f v1.2.3 //indirect x.y/g v1.2.3 // indirect ) `, }, { `existing_line`, `module m require x.y/a v1.0.0 require x.y/c v1.0.0 // indirect `, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", false}, {"x.y/c", "v1.2.3", true}, {"x.y/d", "v1.2.3", true}, }, `module m require ( x.y/a v1.2.3 x.y/b v1.2.3 ) require ( x.y/c v1.2.3 // indirect x.y/d v1.2.3 // indirect )`, }, { `existing_multi`, `module m require x.y/a v1.2.3 require x.y/b v1.2.3 // demoted to indirect require x.y/c v1.0.0 // not v1.2.3! require x.y/d v1.2.3 // comment kept require x.y/e v1.2.3 // comment kept require x.y/f v1.2.3 // indirect; promoted to direct // promoted to direct require x.y/g v1.2.3 // indirect require x.y/i v1.2.3 // indirect require x.y/j v1.2.3 // indirect `, []require{ {"x.y/h", "v1.2.3", false}, // out of alphabetical order {"x.y/i", "v1.2.3", true}, {"x.y/j", "v1.2.3", true}, {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", true}, {"x.y/c", "v1.2.3", false}, {"x.y/d", "v1.2.3", false}, {"x.y/e", "v1.2.3", true}, {"x.y/f", "v1.2.3", false}, {"x.y/g", "v1.2.3", false}, }, `module m require ( x.y/a v1.2.3 x.y/h v1.2.3 ) require x.y/b v1.2.3 // indirect; demoted to indirect require x.y/c v1.2.3 // not v1.2.3! require x.y/d v1.2.3 // comment kept require x.y/e v1.2.3 // indirect; comment kept require x.y/f v1.2.3 // promoted to direct // promoted to direct require x.y/g v1.2.3 require x.y/i v1.2.3 // indirect require x.y/j v1.2.3 // indirect `, }, { `existing_duplicate`, `module m require ( x.y/a v1.0.0 // zero x.y/a v1.1.0 // one x.y/a v1.2.3 // two ) `, []require{ {"x.y/a", "v1.2.3", true}, }, `module m require x.y/a v1.2.3 // indirect; zero `, }, { `existing_duplicate_multi`, `module m require x.y/a v1.0.0 // zero require x.y/a v1.1.0 // one require x.y/a v1.2.3 // two `, []require{ {"x.y/a", "v1.2.3", true}, }, `module m require x.y/a v1.2.3 // indirect; zero `, }, { `existing_duplicate_mix_indirect`, `module m require ( x.y/a v1.0.0 // zero x.y/a v1.1.0 // indirect; one x.y/a v1.2.3 // indirect; two ) `, []require{ {"x.y/a", "v1.2.3", true}, }, `module m require x.y/a v1.2.3 // indirect; zero `, }, { `existing_duplicate_mix_direct`, `module m require ( x.y/a v1.0.0 // indirect; zero x.y/a v1.1.0 // one x.y/a v1.2.3 // two ) `, []require{ {"x.y/a", "v1.2.3", false}, }, `module m require x.y/a v1.2.3 // zero `, }, { `add_indirect_after_last_direct`, `module m require ( x.y/a v1.0.0 // comment a preserved x.y/d v1.0.0 // comment d preserved ) require ( x.y/b v1.0.0 // comment b preserved x.y/e v1.0.0 // comment e preserved ) go 1.17 `, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", false}, {"x.y/c", "v1.2.3", true}, {"x.y/d", "v1.2.3", false}, {"x.y/e", "v1.2.3", false}, {"x.y/f", "v1.2.3", true}, }, `module m require ( x.y/a v1.2.3 // comment a preserved x.y/d v1.2.3 // comment d preserved ) require ( x.y/b v1.2.3 // comment b preserved x.y/e v1.2.3 // comment e preserved ) require ( x.y/c v1.2.3 // indirect x.y/f v1.2.3 // indirect ) go 1.17 `, }, { `add_direct_before_first_indirect`, `module m require ( x.y/b v1.0.0 // indirect; comment b preserved x.y/e v1.0.0 // indirect; comment d preserved ) require ( x.y/c v1.0.0 // indirect; comment c preserved x.y/f v1.0.0 // indirect; comment e preserved ) `, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", true}, {"x.y/c", "v1.2.3", true}, {"x.y/d", "v1.2.3", false}, {"x.y/e", "v1.2.3", true}, {"x.y/f", "v1.2.3", true}, }, `module m require ( x.y/b v1.2.3 // indirect; comment b preserved x.y/e v1.2.3 // indirect; comment d preserved ) require ( x.y/c v1.2.3 // indirect; comment c preserved x.y/f v1.2.3 // indirect; comment e preserved ) require ( x.y/a v1.2.3 x.y/d v1.2.3 ) `, }, { `add_indirect_after_mixed`, `module m require ( x.y/a v1.0.0 x.y/b v1.0.0 // indirect ) `, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", true}, {"x.y/c", "v1.2.3", true}, {"x.y/d", "v1.2.3", false}, {"x.y/e", "v1.2.3", true}, }, `module m require ( x.y/a v1.2.3 x.y/d v1.2.3 ) require ( x.y/b v1.2.3 // indirect x.y/c v1.2.3 // indirect x.y/e v1.2.3 // indirect ) `, }, { `preserve_block_comment_indirect_to_direct`, `module m // save require ( x.y/a v1.2.3 // indirect ) `, []require{ {"x.y/a", "v1.2.3", false}, }, `module m // save require x.y/a v1.2.3 `, }, { `preserve_block_comment_direct_to_indirect`, `module m // save require ( x.y/a v1.2.3 ) `, []require{ {"x.y/a", "v1.2.3", true}, }, `module m // save require x.y/a v1.2.3 // indirect `, }, { `regroup_flat_uncommented_block`, `module m require ( x.y/a v1.0.0 // a x.y/b v1.0.0 // indirect; b x.y/c v1.0.0 // indirect )`, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", true}, {"x.y/c", "v1.2.3", true}, {"x.y/d", "v1.2.3", false}, }, `module m require ( x.y/a v1.2.3 // a x.y/d v1.2.3 ) require ( x.y/b v1.2.3 // indirect; b x.y/c v1.2.3 // indirect )`, }, { `dont_regroup_flat_commented_block`, `module m // dont regroup require ( x.y/a v1.0.0 x.y/b v1.0.0 // indirect x.y/c v1.0.0 // indirect )`, []require{ {"x.y/a", "v1.2.3", false}, {"x.y/b", "v1.2.3", true}, {"x.y/c", "v1.2.3", true}, {"x.y/d", "v1.2.3", false}, }, `module m // dont regroup require ( x.y/a v1.2.3 x.y/b v1.2.3 // indirect x.y/c v1.2.3 // indirect ) require x.y/d v1.2.3`, }, } var addGoTests = []struct { desc string in string version string out string }{ { `module_only`, `module m `, `1.14`, `module m go 1.14 `, }, { `module_before_require`, `module m require x.y/a v1.2.3 `, `1.14`, `module m go 1.14 require x.y/a v1.2.3 `, }, { `require_before_module`, `require x.y/a v1.2.3 module example.com/inverted `, `1.14`, `require x.y/a v1.2.3 module example.com/inverted go 1.14 `, }, { `require_only`, `require x.y/a v1.2.3 `, `1.14`, `require x.y/a v1.2.3 go 1.14 `, }, } var dropGoTests = []struct { desc string in string out string }{ { `module_only`, `module m go 1.14 `, `module m `, }, { `module_before_require`, `module m go 1.14 require x.y/a v1.2.3 `, `module m require x.y/a v1.2.3 `, }, { `require_before_module`, `require x.y/a v1.2.3 module example.com/inverted go 1.14 `, `require x.y/a v1.2.3 module example.com/inverted `, }, { `require_only`, `require x.y/a v1.2.3 go 1.14 `, `require x.y/a v1.2.3 `, }, } var addToolchainTests = []struct { desc string in string version string out string }{ { `empty`, ``, `go1.17`, `toolchain go1.17 `, }, { `aftergo`, `// this is a comment require x v1.0.0 go 1.17 require y v1.0.0 `, `go1.17`, `// this is a comment require x v1.0.0 go 1.17 toolchain go1.17 require y v1.0.0 `, }, { `already_have_toolchain`, `go 1.17 toolchain go1.18 `, `go1.19`, `go 1.17 toolchain go1.19 `, }, } var dropToolchainTests = []struct { desc string in string out string }{ { `empty`, `toolchain go1.17 `, ``, }, { `aftergo`, `// this is a comment require x v1.0.0 go 1.17 toolchain go1.17 require y v1.0.0 `, `// this is a comment require x v1.0.0 go 1.17 require y v1.0.0 `, }, { `already_have_toolchain`, `go 1.17 toolchain go1.18 `, `go 1.17 `, }, } var addExcludeTests = []struct { desc string in string path string version string out string }{ { `compatible`, `module m `, `example.com`, `v1.2.3`, `module m exclude example.com v1.2.3 `, }, { `gopkg.in v0`, `module m `, `gopkg.in/foo.v0`, `v0.2.3`, `module m exclude gopkg.in/foo.v0 v0.2.3 `, }, { `gopkg.in v1`, `module m `, `gopkg.in/foo.v1`, `v1.2.3`, `module m exclude gopkg.in/foo.v1 v1.2.3 `, }, } var addRetractTests = []struct { desc string in string low string high string rationale string out string }{ { `new_singleton`, `module m `, `v1.2.3`, `v1.2.3`, ``, `module m retract v1.2.3 `, }, { `new_interval`, `module m `, `v1.0.0`, `v1.1.0`, ``, `module m retract [v1.0.0, v1.1.0]`, }, { `duplicate_with_rationale`, `module m retract v1.2.3 `, `v1.2.3`, `v1.2.3`, `bad`, `module m retract ( v1.2.3 // bad v1.2.3 ) `, }, { `duplicate_multiline_rationale`, `module m retract [v1.2.3, v1.2.3] `, `v1.2.3`, `v1.2.3`, `multi line`, `module m retract ( [v1.2.3, v1.2.3] // multi // line v1.2.3 ) `, }, { `duplicate_interval`, `module m retract [v1.0.0, v1.1.0] `, `v1.0.0`, `v1.1.0`, ``, `module m retract ( [v1.0.0, v1.1.0] [v1.0.0, v1.1.0] ) `, }, { `duplicate_singleton`, `module m retract v1.2.3 `, `v1.2.3`, `v1.2.3`, ``, `module m retract ( v1.2.3 v1.2.3 ) `, }, } var dropRetractTests = []struct { desc string in string low string high string out string }{ { `singleton_no_match`, `module m retract v1.2.3 `, `v1.0.0`, `v1.0.0`, `module m retract v1.2.3 `, }, { `singleton_match_one`, `module m retract v1.2.2 retract v1.2.3 retract v1.2.4 `, `v1.2.3`, `v1.2.3`, `module m retract v1.2.2 retract v1.2.4 `, }, { `singleton_match_all`, `module m retract v1.2.3 // first retract v1.2.3 // second `, `v1.2.3`, `v1.2.3`, `module m `, }, { `interval_match`, `module m retract [v1.2.3, v1.2.3] `, `v1.2.3`, `v1.2.3`, `module m `, }, { `interval_superset_no_match`, `module m retract [v1.0.0, v1.1.0] `, `v1.0.0`, `v1.2.0`, `module m retract [v1.0.0, v1.1.0] `, }, { `singleton_match_middle`, `module m retract v1.2.3 `, `v1.2.3`, `v1.2.3`, `module m `, }, { `interval_match_middle_block`, `module m retract ( v1.0.0 [v1.1.0, v1.2.0] v1.3.0 ) `, `v1.1.0`, `v1.2.0`, `module m retract ( v1.0.0 v1.3.0 ) `, }, { `interval_match_all`, `module m retract [v1.0.0, v1.1.0] retract [v1.0.0, v1.1.0] `, `v1.0.0`, `v1.1.0`, `module m `, }, } var retractRationaleTests = []struct { desc, in, want string }{ { `no_comment`, `module m retract v1.0.0`, ``, }, { `prefix_one`, `module m // prefix retract v1.0.0 `, `prefix`, }, { `prefix_multiline`, `module m // one // // two // // three retract v1.0.0`, `one two three`, }, { `suffix`, `module m retract v1.0.0 // suffix `, `suffix`, }, { `prefix_suffix_after`, `module m // prefix retract v1.0.0 // suffix `, `prefix suffix`, }, { `block_only`, `// block retract ( v1.0.0 ) `, `block`, }, { `block_and_line`, `// block retract ( // line v1.0.0 ) `, `line`, }, } var moduleDeprecatedTests = []struct { desc, in, want string }{ // retractRationaleTests exercises some of the same code, so these tests // don't exhaustively cover comment extraction. { `no_comment`, `module m`, ``, }, { `other_comment`, `// yo module m`, ``, }, { `deprecated_no_colon`, `//Deprecated module m`, ``, }, { `deprecated_no_space`, `//Deprecated:blah module m`, `blah`, }, { `deprecated_simple`, `// Deprecated: blah module m`, `blah`, }, { `deprecated_lowercase`, `// deprecated: blah module m`, ``, }, { `deprecated_multiline`, `// Deprecated: one // two module m`, "one\ntwo", }, { `deprecated_mixed`, `// some other comment // Deprecated: blah module m`, ``, }, { `deprecated_middle`, `// module m is Deprecated: blah module m`, ``, }, { `deprecated_multiple`, `// Deprecated: a // Deprecated: b module m`, "a\nDeprecated: b", }, { `deprecated_paragraph`, `// Deprecated: a // b // // c module m`, "a\nb", }, { `deprecated_paragraph_space`, `// Deprecated: the next line has a space // // c module m`, "the next line has a space", }, { `deprecated_suffix`, `module m // Deprecated: blah`, `blah`, }, { `deprecated_mixed_suffix`, `// some other comment module m // Deprecated: blah`, ``, }, { `deprecated_mixed_suffix_paragraph`, `// some other comment // module m // Deprecated: blah`, `blah`, }, { `deprecated_block`, `// Deprecated: blah module ( m )`, `blah`, }, } var sortBlocksTests = []struct { desc, in, out string strict bool }{ { `exclude_duplicates_removed`, `module m exclude x.y/z v1.0.0 // a exclude x.y/z v1.0.0 // b exclude ( x.y/w v1.1.0 x.y/z v1.0.0 // c ) `, `module m exclude x.y/z v1.0.0 // a exclude ( x.y/w v1.1.0 )`, true, }, { `replace_duplicates_removed`, `module m replace x.y/z v1.0.0 => ./a replace x.y/z v1.1.0 => ./b replace ( x.y/z v1.0.0 => ./c ) `, `module m replace x.y/z v1.1.0 => ./b replace ( x.y/z v1.0.0 => ./c ) `, true, }, { `retract_duplicates_not_removed`, `module m // block retract ( v1.0.0 // one v1.0.0 // two )`, `module m // block retract ( v1.0.0 // one v1.0.0 // two )`, true, }, // Tests below this point just check sort order. // Non-retract blocks are sorted lexicographically in ascending order. // retract blocks are sorted using semver in descending order. { `sort_lexicographically`, `module m sort ( aa cc bb zz v1.2.0 v1.11.0 )`, `module m sort ( aa bb cc v1.11.0 v1.2.0 zz ) `, false, }, { `sort_retract`, `module m retract ( [v1.2.0, v1.3.0] [v1.1.0, v1.3.0] [v1.1.0, v1.2.0] v1.0.0 v1.1.0 v1.2.0 v1.3.0 v1.4.0 ) `, `module m retract ( v1.4.0 v1.3.0 [v1.2.0, v1.3.0] v1.2.0 [v1.1.0, v1.3.0] [v1.1.0, v1.2.0] v1.1.0 v1.0.0 ) `, false, }, // Exclude blocks are sorted using semver in ascending order // in go.mod files that opt in to Go version 1.21 or newer. { `sort_exclude_go121_semver`, `module m go 1.21 exclude ( b.example/m v0.9.0 a.example/m v1.0.0 b.example/m v0.10.0 c.example/m v1.1.0 b.example/m v0.11.0 )`, `module m go 1.21 exclude ( a.example/m v1.0.0 b.example/m v0.9.0 b.example/m v0.10.0 b.example/m v0.11.0 c.example/m v1.1.0 ) `, true, }, { `sort_exclude_!go121_lexicographically`, // Maintain the previous (less featureful) behavior to avoid unnecessary churn. `module m exclude ( b.example/m v0.9.0 a.example/m v1.0.0 b.example/m v0.10.0 c.example/m v1.1.0 b.example/m v0.11.0 )`, `module m exclude ( a.example/m v1.0.0 b.example/m v0.10.0 b.example/m v0.11.0 b.example/m v0.9.0 c.example/m v1.1.0 ) `, true, }, } var addRetractValidateVersionTests = []struct { desc string path string low, high string wantErr string }{ { `blank_version`, `example.com/m`, ``, ``, `version "" invalid: must be of the form v1.2.3`, }, { `missing prefix`, `example.com/m`, `1.0.0`, `1.0.0`, `version "1.0.0" invalid: must be of the form v1.2.3`, }, { `non-canonical`, `example.com/m`, `v1.2`, `v1.2`, `version "v1.2" invalid: must be of the form v1.2.3`, }, { `invalid range`, `example.com/m`, `v1.2.3`, `v1.3`, `version "v1.3" invalid: must be of the form v1.2.3`, }, { `mismatched major`, `example.com/m/v2`, `v1.0.0`, `v1.0.0`, `version "v1.0.0" invalid: should be v2, not v1`, }, { `missing +incompatible`, `example.com/m`, `v2.0.0`, `v2.0.0`, `version "v2.0.0" invalid: should be v2.0.0+incompatible (or module example.com/m/v2)`, }, } var addExcludeValidateVersionTests = []struct { desc string path string version string wantErr string }{ { `blank version`, `example.com/m`, ``, `version "" invalid: must be of the form v1.2.3`, }, { `missing prefix`, `example.com/m`, `1.0.0`, `version "1.0.0" invalid: must be of the form v1.2.3`, }, { `non-canonical`, `example.com/m`, `v1.2`, `version "v1.2" invalid: must be of the form v1.2.3`, }, { `mismatched major`, `example.com/m/v2`, `v1.2.3`, `version "v1.2.3" invalid: should be v2, not v1`, }, { `missing +incompatible`, `example.com/m`, `v2.3.4`, `version "v2.3.4" invalid: should be v2.3.4+incompatible (or module example.com/m/v2)`, }, } var fixVersionTests = []struct { desc, in, want, wantErr string fix VersionFixer }{ { desc: `require`, in: `require example.com/m 1.0.0`, want: `require example.com/m v1.0.0`, fix: fixV, }, { desc: `replace`, in: `replace example.com/m 1.0.0 => example.com/m 1.1.0`, want: `replace example.com/m v1.0.0 => example.com/m v1.1.0`, fix: fixV, }, { desc: `replace_version_in_path`, in: `replace example.com/m@v1.0.0 => example.com/m@v1.1.0`, wantErr: `replacement module must match format 'path version', not 'path@version'`, fix: fixV, }, { desc: `replace_version_in_later_path`, in: `replace example.com/m => example.com/m@v1.1.0`, wantErr: `replacement module must match format 'path version', not 'path@version'`, fix: fixV, }, { desc: `exclude`, in: `exclude example.com/m 1.0.0`, want: `exclude example.com/m v1.0.0`, fix: fixV, }, { desc: `retract_single`, in: `module example.com/m retract 1.0.0`, want: `module example.com/m retract v1.0.0`, fix: fixV, }, { desc: `retract_interval`, in: `module example.com/m retract [1.0.0, 1.1.0]`, want: `module example.com/m retract [v1.0.0, v1.1.0]`, fix: fixV, }, { desc: `retract_nomod`, in: `retract 1.0.0`, wantErr: `in:1: no module directive found, so retract cannot be used`, fix: fixV, }, } var modifyEmptyFilesTests = []struct { desc string operations func(f *File) want string }{ { desc: `addGoStmt`, operations: func(f *File) { f.AddGoStmt("1.20") }, want: `go 1.20`, }, } var addGodebugTests = []struct { desc string in string key string value string out string }{ { `existing`, ` module m godebug key=old `, "key", "new", ` module m godebug key=new `, }, { `existing2`, ` module m godebug ( key=first // first other=first-a // first-a ) godebug key=second // second godebug ( key=third // third other=third-a // third-a ) `, "key", "new", ` module m godebug ( key=new // first other=first-a// first-a ) godebug other=third-a // third-a `, }, { `new`, ` module m godebug other=foo `, "key", "new", ` module m godebug ( other=foo key=new ) `, }, { `new2`, ` module m godebug first=1 godebug second=2 `, "third", "3", ` module m godebug first=1 godebug ( second=2 third=3 ) `, }, } var dropGodebugTests = []struct { desc string in string key string out string }{ { `existing`, ` module m godebug key=old `, "key", ` module m `, }, { `existing2`, ` module m godebug ( key=first // first other=first-a // first-a ) godebug key=second // second godebug ( key=third // third other=third-a // third-a ) `, "key", ` module m godebug other=first-a// first-a godebug other=third-a // third-a `, }, { `new`, ` module m godebug other=foo `, "key", ` module m godebug other=foo `, }, } func fixV(path, version string) (string, error) { if path != "example.com/m" { return "", fmt.Errorf("module path must be example.com/m") } return "v" + version, nil } func TestAddRequire(t *testing.T) { for _, tt := range addRequireTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { err := f.AddRequire(tt.path, tt.vers) f.Cleanup() return err }) }) } } func TestAddGodebug(t *testing.T) { for _, tt := range addGodebugTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { err := f.AddGodebug(tt.key, tt.value) f.Cleanup() return err }) }) } } func TestSetRequire(t *testing.T) { for _, tt := range setRequireTests { t.Run(tt.desc, func(t *testing.T) { var mods []*Require for _, mod := range tt.mods { mods = append(mods, &Require{ Mod: module.Version{ Path: mod.path, Version: mod.vers, }, Indirect: mod.indirect, }) } f := testEdit(t, tt.in, tt.out, true, func(f *File) error { f.SetRequire(mods) f.Cleanup() return nil }) if len(f.Require) != len(mods) { t.Errorf("after Cleanup, len(Require) = %v; want %v", len(f.Require), len(mods)) } }) } } func TestSetRequireSeparateIndirect(t *testing.T) { for _, tt := range setRequireSeparateIndirectTests { t.Run(tt.desc, func(t *testing.T) { var mods []*Require for _, mod := range tt.mods { mods = append(mods, &Require{ Mod: module.Version{ Path: mod.path, Version: mod.vers, }, Indirect: mod.indirect, }) } f := testEdit(t, tt.in, tt.out, true, func(f *File) error { f.SetRequireSeparateIndirect(mods) f.Cleanup() return nil }) if len(f.Require) != len(mods) { t.Errorf("after Cleanup, len(Require) = %v; want %v", len(f.Require), len(mods)) } }) } } func TestAddGo(t *testing.T) { for _, tt := range addGoTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { return f.AddGoStmt(tt.version) }) }) } } func TestDropGo(t *testing.T) { for _, tt := range dropGoTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { f.DropGoStmt() return nil }) }) } } func TestAddToolchain(t *testing.T) { for _, tt := range addToolchainTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { return f.AddToolchainStmt(tt.version) }) }) } } func TestDropToolchain(t *testing.T) { for _, tt := range dropToolchainTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { f.DropToolchainStmt() return nil }) }) } } func TestDropGodebug(t *testing.T) { for _, tt := range dropGodebugTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { f.DropGodebug(tt.key) f.Cleanup() return nil }) }) } } func TestAddExclude(t *testing.T) { for _, tt := range addExcludeTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { return f.AddExclude(tt.path, tt.version) }) }) } } func TestAddRetract(t *testing.T) { for _, tt := range addRetractTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { return f.AddRetract(VersionInterval{Low: tt.low, High: tt.high}, tt.rationale) }) }) } } func TestDropRetract(t *testing.T) { for _, tt := range dropRetractTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, true, func(f *File) error { if err := f.DropRetract(VersionInterval{Low: tt.low, High: tt.high}); err != nil { return err } f.Cleanup() return nil }) }) } } func TestRetractRationale(t *testing.T) { for _, tt := range retractRationaleTests { t.Run(tt.desc, func(t *testing.T) { f, err := Parse("in", []byte(tt.in), nil) if err != nil { t.Fatal(err) } if len(f.Retract) != 1 { t.Fatalf("got %d retract directives; want 1", len(f.Retract)) } if got := f.Retract[0].Rationale; got != tt.want { t.Errorf("got %q; want %q", got, tt.want) } }) } } func TestModuleDeprecated(t *testing.T) { for _, tt := range moduleDeprecatedTests { t.Run(tt.desc, func(t *testing.T) { f, err := Parse("in", []byte(tt.in), nil) if err != nil { t.Fatal(err) } if f.Module.Deprecated != tt.want { t.Errorf("got %q; want %q", f.Module.Deprecated, tt.want) } }) } } func TestSortBlocks(t *testing.T) { for _, tt := range sortBlocksTests { t.Run(tt.desc, func(t *testing.T) { testEdit(t, tt.in, tt.out, tt.strict, func(f *File) error { f.SortBlocks() return nil }) }) } } func testEdit(t *testing.T, in, want string, strict bool, transform func(f *File) error) *File { t.Helper() parse := Parse if !strict { parse = ParseLax } f, err := parse("in", []byte(in), nil) if err != nil { t.Fatal(err) } g, err := parse("out", []byte(want), nil) if err != nil { t.Fatal(err) } golden, err := g.Format() if err != nil { t.Fatal(err) } if err := transform(f); err != nil { t.Fatal(err) } out, err := f.Format() if err != nil { t.Fatal(err) } if !bytes.Equal(out, golden) { t.Errorf("have:\n%s\nwant:\n%s", out, golden) } return f } func TestAddRetractValidateVersion(t *testing.T) { for _, tt := range addRetractValidateVersionTests { t.Run(tt.desc, func(t *testing.T) { f := new(File) if tt.path != "" { if err := f.AddModuleStmt(tt.path); err != nil { t.Fatal(err) } t.Logf("module %s", AutoQuote(tt.path)) } interval := VersionInterval{Low: tt.low, High: tt.high} if err := f.AddRetract(interval, ``); err == nil || err.Error() != tt.wantErr { errStr := "" if err != nil { errStr = fmt.Sprintf("%#q", err) } t.Fatalf("f.AddRetract(%+v, ``) = %s\nwant %#q", interval, errStr, tt.wantErr) } }) } } func TestAddExcludeValidateVersion(t *testing.T) { for _, tt := range addExcludeValidateVersionTests { t.Run(tt.desc, func(t *testing.T) { f, err := Parse("in", []byte("module m"), nil) if err != nil { t.Fatal(err) } if err = f.AddExclude(tt.path, tt.version); err == nil || err.Error() != tt.wantErr { errStr := "" if err != nil { errStr = fmt.Sprintf("%#q", err) } t.Fatalf("f.AddExclude(%q, %q) = %s\nwant %#q", tt.path, tt.version, errStr, tt.wantErr) } }) } } func TestFixVersion(t *testing.T) { for _, tt := range fixVersionTests { t.Run(tt.desc, func(t *testing.T) { inFile, err := Parse("in", []byte(tt.in), tt.fix) if err != nil { if tt.wantErr == "" { t.Fatalf("unexpected error: %v", err) } if errMsg := err.Error(); !strings.Contains(errMsg, tt.wantErr) { t.Fatalf("got error %q; want error containing %q", errMsg, tt.wantErr) } return } got, err := inFile.Format() if err != nil { t.Fatal(err) } outFile, err := Parse("out", []byte(tt.want), nil) if err != nil { t.Fatal(err) } want, err := outFile.Format() if err != nil { t.Fatal(err) } if !bytes.Equal(got, want) { t.Fatalf("got:\n%s\nwant:\n%s", got, want) } }) } } func TestAddOnEmptyFile(t *testing.T) { for _, tt := range modifyEmptyFilesTests { t.Run(tt.desc, func(t *testing.T) { f := &File{} tt.operations(f) expect, err := Parse("out", []byte(tt.want), nil) if err != nil { t.Fatal(err) } golden, err := expect.Format() if err != nil { t.Fatal(err) } got, err := f.Format() if err != nil { t.Fatal(err) } if !bytes.Equal(got, golden) { t.Fatalf("got:\n%s\nwant:\n%s", got, golden) } }) } } mod-0.19.0/modfile/testdata/000077500000000000000000000000001463702072700156205ustar00rootroot00000000000000mod-0.19.0/modfile/testdata/block.golden000066400000000000000000000005171463702072700201070ustar00rootroot00000000000000// comment x "y" z // block block ( // block-eol // x-before-line "x" (y // x-eol "x") y // y-eol "x1" "x2" // line "x3" "x4" "x5" // y-line "y" // y-eol "z" // z-eol ) // block-eol2 block1 ( ) block2 (x y z) block3 "w" ( ) // empty block block4 "x" () "y" // not a block block5 ("z" // also not a block // eof mod-0.19.0/modfile/testdata/block.in000066400000000000000000000005241463702072700172430ustar00rootroot00000000000000// comment x "y" z // block block ( // block-eol // x-before-line "x" ( y // x-eol "x" ) y // y-eol "x1" "x2" // line "x3" "x4" "x5" // y-line "y" // y-eol "z" // z-eol ) // block-eol2 block1() block2 (x y z) block3 "w" ( ) // empty block block4 "x" ( ) "y" // not a block block5 ( "z" // also not a block // eof mod-0.19.0/modfile/testdata/comment.golden000066400000000000000000000001571463702072700204570ustar00rootroot00000000000000// comment module "x" // eol // mid comment // comment 2 // comment 2 line 2 module "y" // eoy // comment 3 mod-0.19.0/modfile/testdata/comment.in000066400000000000000000000001551463702072700176130ustar00rootroot00000000000000// comment module "x" // eol // mid comment // comment 2 // comment 2 line 2 module "y" // eoy // comment 3 mod-0.19.0/modfile/testdata/empty.golden000066400000000000000000000000001463702072700201360ustar00rootroot00000000000000mod-0.19.0/modfile/testdata/empty.in000066400000000000000000000000001463702072700172740ustar00rootroot00000000000000mod-0.19.0/modfile/testdata/goline.golden000066400000000000000000000000341463702072700202640ustar00rootroot00000000000000go 1.2.3 toolchain default mod-0.19.0/modfile/testdata/goline.in000066400000000000000000000000331463702072700174210ustar00rootroot00000000000000go 1.2.3 toolchain default mod-0.19.0/modfile/testdata/gopkg.in.golden000066400000000000000000000001431463702072700205240ustar00rootroot00000000000000module x require ( gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 gopkg.in/yaml.v2 v2.2.1 ) mod-0.19.0/modfile/testdata/module.golden000066400000000000000000000000131463702072700202710ustar00rootroot00000000000000module abc mod-0.19.0/modfile/testdata/module.in000066400000000000000000000000151463702072700174310ustar00rootroot00000000000000module "abc" mod-0.19.0/modfile/testdata/replace.golden000066400000000000000000000002611463702072700204240ustar00rootroot00000000000000module abc replace xyz v1.2.3 => /tmp/z replace xyz v1.3.4 => my/xyz v1.3.4-me replace ( w v1.0.0 => "./a," w v1.0.1 => "./a()" w v1.0.2 => "./a[]" w v1.0.3 => "./a{}" ) mod-0.19.0/modfile/testdata/replace.in000066400000000000000000000003031463702072700175570ustar00rootroot00000000000000module "abc" replace "xyz" v1.2.3 => "/tmp/z" replace "xyz" v1.3.4 => "my/xyz" v1.3.4-me replace ( "w" v1.0.0 => "./a," "w" v1.0.1 => "./a()" "w" v1.0.2 => "./a[]" "w" v1.0.3 => "./a{}" ) mod-0.19.0/modfile/testdata/replace2.golden000066400000000000000000000002451463702072700205100ustar00rootroot00000000000000module abc replace ( xyz v1.2.3 => /tmp/z xyz v1.3.4 => my/xyz v1.3.4-me xyz v1.4.5 => "/tmp/my dir" xyz v1.5.6 => my/xyz v1.5.6 xyz => my/other/xyz v1.5.4 ) mod-0.19.0/modfile/testdata/replace2.in000066400000000000000000000002631463702072700176460ustar00rootroot00000000000000module "abc" replace ( "xyz" v1.2.3 => "/tmp/z" "xyz" v1.3.4 => "my/xyz" "v1.3.4-me" xyz "v1.4.5" => "/tmp/my dir" xyz v1.5.6 => my/xyz v1.5.6 xyz => my/other/xyz v1.5.4 ) mod-0.19.0/modfile/testdata/retract.golden000066400000000000000000000001351463702072700204550ustar00rootroot00000000000000module abc retract v1.2.3 retract [v1.2.3, v1.2.4] retract ( v1.2.3 [v1.2.3, v1.2.4] ) mod-0.19.0/modfile/testdata/retract.in000066400000000000000000000001571463702072700176170ustar00rootroot00000000000000module abc retract "v1.2.3" retract [ "v1.2.3" , "v1.2.4" ] retract ( "v1.2.3" [ "v1.2.3" , "v1.2.4" ] ) mod-0.19.0/modfile/testdata/rule1.golden000066400000000000000000000000571463702072700200440ustar00rootroot00000000000000module "x" module "y" require "x" require x mod-0.19.0/modfile/testdata/work/000077500000000000000000000000001463702072700166025ustar00rootroot00000000000000mod-0.19.0/modfile/testdata/work/comment.golden000066400000000000000000000001451463702072700214360ustar00rootroot00000000000000// comment use x // eol // mid comment // comment 2 // comment 2 line 2 use y // eoy // comment 3 mod-0.19.0/modfile/testdata/work/comment.in000066400000000000000000000001471463702072700205760ustar00rootroot00000000000000// comment use "x" // eol // mid comment // comment 2 // comment 2 line 2 use "y" // eoy // comment 3 mod-0.19.0/modfile/testdata/work/empty.golden000066400000000000000000000000001463702072700211200ustar00rootroot00000000000000mod-0.19.0/modfile/testdata/work/empty.in000066400000000000000000000000001463702072700202560ustar00rootroot00000000000000mod-0.19.0/modfile/testdata/work/goline.golden000066400000000000000000000000341463702072700212460ustar00rootroot00000000000000go 1.2.3 toolchain default mod-0.19.0/modfile/testdata/work/goline.in000066400000000000000000000000331463702072700204030ustar00rootroot00000000000000go 1.2.3 toolchain default mod-0.19.0/modfile/testdata/work/replace.golden000066400000000000000000000002561463702072700214120ustar00rootroot00000000000000use abc replace xyz v1.2.3 => /tmp/z replace xyz v1.3.4 => my/xyz v1.3.4-me replace ( w v1.0.0 => "./a," w v1.0.1 => "./a()" w v1.0.2 => "./a[]" w v1.0.3 => "./a{}" ) mod-0.19.0/modfile/testdata/work/replace.in000066400000000000000000000003001463702072700205360ustar00rootroot00000000000000use "abc" replace "xyz" v1.2.3 => "/tmp/z" replace "xyz" v1.3.4 => "my/xyz" v1.3.4-me replace ( "w" v1.0.0 => "./a," "w" v1.0.1 => "./a()" "w" v1.0.2 => "./a[]" "w" v1.0.3 => "./a{}" ) mod-0.19.0/modfile/testdata/work/replace2.golden000066400000000000000000000002421463702072700214670ustar00rootroot00000000000000use abc replace ( xyz v1.2.3 => /tmp/z xyz v1.3.4 => my/xyz v1.3.4-me xyz v1.4.5 => "/tmp/my dir" xyz v1.5.6 => my/xyz v1.5.6 xyz => my/other/xyz v1.5.4 ) mod-0.19.0/modfile/testdata/work/replace2.in000066400000000000000000000002601463702072700206250ustar00rootroot00000000000000use "abc" replace ( "xyz" v1.2.3 => "/tmp/z" "xyz" v1.3.4 => "my/xyz" "v1.3.4-me" xyz "v1.4.5" => "/tmp/my dir" xyz v1.5.6 => my/xyz v1.5.6 xyz => my/other/xyz v1.5.4 ) mod-0.19.0/modfile/testdata/work/use.golden000066400000000000000000000000401463702072700205620ustar00rootroot00000000000000use ../foo use ( /bar baz ) mod-0.19.0/modfile/testdata/work/use.in000066400000000000000000000000461463702072700177260ustar00rootroot00000000000000use "../foo" use ( "/bar" "baz" ) mod-0.19.0/modfile/work.go000066400000000000000000000173031463702072700153240ustar00rootroot00000000000000// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modfile import ( "fmt" "sort" "strings" ) // A WorkFile is the parsed, interpreted form of a go.work file. type WorkFile struct { Go *Go Toolchain *Toolchain Godebug []*Godebug Use []*Use Replace []*Replace Syntax *FileSyntax } // A Use is a single directory statement. type Use struct { Path string // Use path of module. ModulePath string // Module path in the comment. Syntax *Line } // ParseWork parses and returns a go.work file. // // file is the name of the file, used in positions and errors. // // data is the content of the file. // // fix is an optional function that canonicalizes module versions. // If fix is nil, all module versions must be canonical ([module.CanonicalVersion] // must return the same string). func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { fs, err := parse(file, data) if err != nil { return nil, err } f := &WorkFile{ Syntax: fs, } var errs ErrorList for _, x := range fs.Stmt { switch x := x.(type) { case *Line: f.add(&errs, x, x.Token[0], x.Token[1:], fix) case *LineBlock: if len(x.Token) > 1 { errs = append(errs, Error{ Filename: file, Pos: x.Start, Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), }) continue } switch x.Token[0] { default: errs = append(errs, Error{ Filename: file, Pos: x.Start, Err: fmt.Errorf("unknown block type: %s", strings.Join(x.Token, " ")), }) continue case "godebug", "use", "replace": for _, l := range x.Line { f.add(&errs, l, x.Token[0], l.Token, fix) } } } } if len(errs) > 0 { return nil, errs } return f, nil } // Cleanup cleans up the file f after any edit operations. // To avoid quadratic behavior, modifications like [WorkFile.DropRequire] // clear the entry but do not remove it from the slice. // Cleanup cleans out all the cleared entries. func (f *WorkFile) Cleanup() { w := 0 for _, r := range f.Use { if r.Path != "" { f.Use[w] = r w++ } } f.Use = f.Use[:w] w = 0 for _, r := range f.Replace { if r.Old.Path != "" { f.Replace[w] = r w++ } } f.Replace = f.Replace[:w] f.Syntax.Cleanup() } func (f *WorkFile) AddGoStmt(version string) error { if !GoVersionRE.MatchString(version) { return fmt.Errorf("invalid language version %q", version) } if f.Go == nil { stmt := &Line{Token: []string{"go", version}} f.Go = &Go{ Version: version, Syntax: stmt, } // Find the first non-comment-only block and add // the go statement before it. That will keep file comments at the top. i := 0 for i = 0; i < len(f.Syntax.Stmt); i++ { if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { break } } f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) } else { f.Go.Version = version f.Syntax.updateLine(f.Go.Syntax, "go", version) } return nil } func (f *WorkFile) AddToolchainStmt(name string) error { if !ToolchainRE.MatchString(name) { return fmt.Errorf("invalid toolchain name %q", name) } if f.Toolchain == nil { stmt := &Line{Token: []string{"toolchain", name}} f.Toolchain = &Toolchain{ Name: name, Syntax: stmt, } // Find the go line and add the toolchain line after it. // Or else find the first non-comment-only block and add // the toolchain line before it. That will keep file comments at the top. i := 0 for i = 0; i < len(f.Syntax.Stmt); i++ { if line, ok := f.Syntax.Stmt[i].(*Line); ok && len(line.Token) > 0 && line.Token[0] == "go" { i++ goto Found } } for i = 0; i < len(f.Syntax.Stmt); i++ { if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { break } } Found: f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) } else { f.Toolchain.Name = name f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) } return nil } // DropGoStmt deletes the go statement from the file. func (f *WorkFile) DropGoStmt() { if f.Go != nil { f.Go.Syntax.markRemoved() f.Go = nil } } // DropToolchainStmt deletes the toolchain statement from the file. func (f *WorkFile) DropToolchainStmt() { if f.Toolchain != nil { f.Toolchain.Syntax.markRemoved() f.Toolchain = nil } } // AddGodebug sets the first godebug line for key to value, // preserving any existing comments for that line and removing all // other godebug lines for key. // // If no line currently exists for key, AddGodebug adds a new line // at the end of the last godebug block. func (f *WorkFile) AddGodebug(key, value string) error { need := true for _, g := range f.Godebug { if g.Key == key { if need { g.Value = value f.Syntax.updateLine(g.Syntax, "godebug", key+"="+value) need = false } else { g.Syntax.markRemoved() *g = Godebug{} } } } if need { f.addNewGodebug(key, value) } return nil } // addNewGodebug adds a new godebug key=value line at the end // of the last godebug block, regardless of any existing godebug lines for key. func (f *WorkFile) addNewGodebug(key, value string) { line := f.Syntax.addLine(nil, "godebug", key+"="+value) g := &Godebug{ Key: key, Value: value, Syntax: line, } f.Godebug = append(f.Godebug, g) } func (f *WorkFile) DropGodebug(key string) error { for _, g := range f.Godebug { if g.Key == key { g.Syntax.markRemoved() *g = Godebug{} } } return nil } func (f *WorkFile) AddUse(diskPath, modulePath string) error { need := true for _, d := range f.Use { if d.Path == diskPath { if need { d.ModulePath = modulePath f.Syntax.updateLine(d.Syntax, "use", AutoQuote(diskPath)) need = false } else { d.Syntax.markRemoved() *d = Use{} } } } if need { f.AddNewUse(diskPath, modulePath) } return nil } func (f *WorkFile) AddNewUse(diskPath, modulePath string) { line := f.Syntax.addLine(nil, "use", AutoQuote(diskPath)) f.Use = append(f.Use, &Use{Path: diskPath, ModulePath: modulePath, Syntax: line}) } func (f *WorkFile) SetUse(dirs []*Use) { need := make(map[string]string) for _, d := range dirs { need[d.Path] = d.ModulePath } for _, d := range f.Use { if modulePath, ok := need[d.Path]; ok { d.ModulePath = modulePath } else { d.Syntax.markRemoved() *d = Use{} } } // TODO(#45713): Add module path to comment. for diskPath, modulePath := range need { f.AddNewUse(diskPath, modulePath) } f.SortBlocks() } func (f *WorkFile) DropUse(path string) error { for _, d := range f.Use { if d.Path == path { d.Syntax.markRemoved() *d = Use{} } } return nil } func (f *WorkFile) AddReplace(oldPath, oldVers, newPath, newVers string) error { return addReplace(f.Syntax, &f.Replace, oldPath, oldVers, newPath, newVers) } func (f *WorkFile) DropReplace(oldPath, oldVers string) error { for _, r := range f.Replace { if r.Old.Path == oldPath && r.Old.Version == oldVers { r.Syntax.markRemoved() *r = Replace{} } } return nil } func (f *WorkFile) SortBlocks() { f.removeDups() // otherwise sorting is unsafe for _, stmt := range f.Syntax.Stmt { block, ok := stmt.(*LineBlock) if !ok { continue } sort.SliceStable(block.Line, func(i, j int) bool { return lineLess(block.Line[i], block.Line[j]) }) } } // removeDups removes duplicate replace directives. // // Later replace directives take priority. // // require directives are not de-duplicated. That's left up to higher-level // logic (MVS). // // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *WorkFile) removeDups() { removeDups(f.Syntax, nil, &f.Replace) } mod-0.19.0/modfile/work_test.go000066400000000000000000000166221463702072700163660ustar00rootroot00000000000000// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modfile import ( "bytes" "os" "path/filepath" "strings" "testing" ) // TODO(#45713): Update these tests once AddUse sets the module path. var workAddUseTests = []struct { desc string in string path string modulePath string out string }{ { `empty`, ``, `foo`, `bar`, `use foo`, }, { `go_stmt_only`, `go 1.17 `, `foo`, `bar`, `go 1.17 use foo `, }, { `use_line_present`, `go 1.17 use baz`, `foo`, `bar`, `go 1.17 use ( baz foo ) `, }, { `use_block_present`, `go 1.17 use ( baz quux ) `, `foo`, `bar`, `go 1.17 use ( baz quux foo ) `, }, { `use_and_replace_present`, `go 1.17 use baz replace a => ./b `, `foo`, `bar`, `go 1.17 use ( baz foo ) replace a => ./b `, }, } var workDropUseTests = []struct { desc string in string path string out string }{ { `empty`, ``, `foo`, ``, }, { `go_stmt_only`, `go 1.17 `, `foo`, `go 1.17 `, }, { `single_use`, `go 1.17 use foo`, `foo`, `go 1.17 `, }, { `use_block`, `go 1.17 use ( foo bar baz )`, `bar`, `go 1.17 use ( foo baz )`, }, { `use_multi`, `go 1.17 use ( foo bar baz ) use foo use quux use foo`, `foo`, `go 1.17 use ( bar baz ) use quux`, }, } var workAddGoTests = []struct { desc string in string version string out string }{ { `empty`, ``, `1.17`, `go 1.17 `, }, { `comment`, `// this is a comment`, `1.17`, `// this is a comment go 1.17`, }, { `use_after_replace`, ` replace example.com/foo => ../bar use foo `, `1.17`, ` go 1.17 replace example.com/foo => ../bar use foo `, }, { `use_before_replace`, `use foo replace example.com/foo => ../bar `, `1.17`, ` go 1.17 use foo replace example.com/foo => ../bar `, }, { `use_only`, `use foo `, `1.17`, ` go 1.17 use foo `, }, { `already_have_go`, `go 1.17 `, `1.18`, ` go 1.18 `, }, } var workAddToolchainTests = []struct { desc string in string version string out string }{ { `empty`, ``, `go1.17`, `toolchain go1.17 `, }, { `aftergo`, `// this is a comment use foo go 1.17 use bar `, `go1.17`, `// this is a comment use foo go 1.17 toolchain go1.17 use bar `, }, { `already_have_toolchain`, `go 1.17 toolchain go1.18 `, `go1.19`, `go 1.17 toolchain go1.19 `, }, } var workSortBlocksTests = []struct { desc, in, out string }{ { `use_duplicates_not_removed`, `go 1.17 use foo use bar use ( foo )`, `go 1.17 use foo use bar use ( foo )`, }, { `replace_duplicates_removed`, `go 1.17 use foo replace x.y/z v1.0.0 => ./a replace x.y/z v1.1.0 => ./b replace ( x.y/z v1.0.0 => ./c ) `, `go 1.17 use foo replace x.y/z v1.1.0 => ./b replace ( x.y/z v1.0.0 => ./c ) `, }, } func TestAddUse(t *testing.T) { for _, tt := range workAddUseTests { t.Run(tt.desc, func(t *testing.T) { testWorkEdit(t, tt.in, tt.out, func(f *WorkFile) error { return f.AddUse(tt.path, tt.modulePath) }) }) } } func TestDropUse(t *testing.T) { for _, tt := range workDropUseTests { t.Run(tt.desc, func(t *testing.T) { testWorkEdit(t, tt.in, tt.out, func(f *WorkFile) error { if err := f.DropUse(tt.path); err != nil { return err } f.Cleanup() return nil }) }) } } func TestWorkAddGo(t *testing.T) { for _, tt := range workAddGoTests { t.Run(tt.desc, func(t *testing.T) { testWorkEdit(t, tt.in, tt.out, func(f *WorkFile) error { return f.AddGoStmt(tt.version) }) }) } } func TestWorkAddToolchain(t *testing.T) { for _, tt := range workAddToolchainTests { t.Run(tt.desc, func(t *testing.T) { testWorkEdit(t, tt.in, tt.out, func(f *WorkFile) error { return f.AddToolchainStmt(tt.version) }) }) } } func TestWorkSortBlocks(t *testing.T) { for _, tt := range workSortBlocksTests { t.Run(tt.desc, func(t *testing.T) { testWorkEdit(t, tt.in, tt.out, func(f *WorkFile) error { f.SortBlocks() return nil }) }) } } func TestWorkAddGodebug(t *testing.T) { for _, tt := range addGodebugTests { t.Run(tt.desc, func(t *testing.T) { in := strings.ReplaceAll(tt.in, "module m", "use foo") out := strings.ReplaceAll(tt.out, "module m", "use foo") testWorkEdit(t, in, out, func(f *WorkFile) error { err := f.AddGodebug(tt.key, tt.value) f.Cleanup() return err }) }) } } func TestWorkDropGodebug(t *testing.T) { for _, tt := range dropGodebugTests { t.Run(tt.desc, func(t *testing.T) { in := strings.ReplaceAll(tt.in, "module m", "use foo") out := strings.ReplaceAll(tt.out, "module m", "use foo") testWorkEdit(t, in, out, func(f *WorkFile) error { f.DropGodebug(tt.key) f.Cleanup() return nil }) }) } } // Test that when files in the testdata directory are parsed // and printed and parsed again, we get the same parse tree // both times. func TestWorkPrintParse(t *testing.T) { outs, err := filepath.Glob("testdata/work/*") if err != nil { t.Fatal(err) } for _, out := range outs { out := out name := filepath.Base(out) t.Run(name, func(t *testing.T) { t.Parallel() data, err := os.ReadFile(out) if err != nil { t.Fatal(err) } base := "testdata/work/" + filepath.Base(out) f, err := parse(base, data) if err != nil { t.Fatalf("parsing original: %v", err) } ndata := Format(f) f2, err := parse(base, ndata) if err != nil { t.Fatalf("parsing reformatted: %v", err) } eq := eqchecker{file: base} if err := eq.check(f, f2); err != nil { t.Errorf("not equal (parse/Format/parse): %v", err) } pf1, err := ParseWork(base, data, nil) if err != nil { t.Errorf("should parse %v: %v", base, err) } if err == nil { pf2, err := ParseWork(base, ndata, nil) if err != nil { t.Fatalf("Parsing reformatted: %v", err) } eq := eqchecker{file: base} if err := eq.check(pf1, pf2); err != nil { t.Errorf("not equal (parse/Format/Parse): %v", err) } ndata2 := Format(pf1.Syntax) pf3, err := ParseWork(base, ndata2, nil) if err != nil { t.Fatalf("Parsing reformatted2: %v", err) } eq = eqchecker{file: base} if err := eq.check(pf1, pf3); err != nil { t.Errorf("not equal (Parse/Format/Parse): %v", err) } ndata = ndata2 } if strings.HasSuffix(out, ".in") { golden, err := os.ReadFile(strings.TrimSuffix(out, ".in") + ".golden") if err != nil { t.Fatal(err) } if !bytes.Equal(ndata, golden) { t.Errorf("formatted %s incorrectly: diff shows -golden, +ours", base) tdiff(t, string(golden), string(ndata)) return } } }) } } func testWorkEdit(t *testing.T, in, want string, transform func(f *WorkFile) error) *WorkFile { t.Helper() parse := ParseWork f, err := parse("in", []byte(in), nil) if err != nil { t.Fatal(err) } g, err := parse("out", []byte(want), nil) if err != nil { t.Fatal(err) } golden := Format(g.Syntax) if err := transform(f); err != nil { t.Fatal(err) } out := Format(f.Syntax) if err != nil { t.Fatal(err) } if !bytes.Equal(out, golden) { t.Errorf("have:\n%s\nwant:\n%s", out, golden) } return f } mod-0.19.0/module/000077500000000000000000000000001463702072700136555ustar00rootroot00000000000000mod-0.19.0/module/module.go000066400000000000000000000656131463702072700155040ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package module defines the module.Version type along with support code. // // The [module.Version] type is a simple Path, Version pair: // // type Version struct { // Path string // Version string // } // // There are no restrictions imposed directly by use of this structure, // but additional checking functions, most notably [Check], verify that // a particular path, version pair is valid. // // # Escaped Paths // // Module paths appear as substrings of file system paths // (in the download cache) and of web server URLs in the proxy protocol. // In general we cannot rely on file systems to be case-sensitive, // nor can we rely on web servers, since they read from file systems. // That is, we cannot rely on the file system to keep rsc.io/QUOTE // and rsc.io/quote separate. Windows and macOS don't. // Instead, we must never require two different casings of a file path. // Because we want the download cache to match the proxy protocol, // and because we want the proxy protocol to be possible to serve // from a tree of static files (which might be stored on a case-insensitive // file system), the proxy protocol must never require two different casings // of a URL path either. // // One possibility would be to make the escaped form be the lowercase // hexadecimal encoding of the actual path bytes. This would avoid ever // needing different casings of a file path, but it would be fairly illegible // to most programmers when those paths appeared in the file system // (including in file paths in compiler errors and stack traces) // in web server logs, and so on. Instead, we want a safe escaped form that // leaves most paths unaltered. // // The safe escaped form is to replace every uppercase letter // with an exclamation mark followed by the letter's lowercase equivalent. // // For example, // // github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. // github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy // github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. // // Import paths that avoid upper-case letters are left unchanged. // Note that because import paths are ASCII-only and avoid various // problematic punctuation (like : < and >), the escaped form is also ASCII-only // and avoids the same problematic punctuation. // // Import paths have never allowed exclamation marks, so there is no // need to define how to escape a literal !. // // # Unicode Restrictions // // Today, paths are disallowed from using Unicode. // // Although paths are currently disallowed from using Unicode, // we would like at some point to allow Unicode letters as well, to assume that // file systems and URLs are Unicode-safe (storing UTF-8), and apply // the !-for-uppercase convention for escaping them in the file system. // But there are at least two subtle considerations. // // First, note that not all case-fold equivalent distinct runes // form an upper/lower pair. // For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) // are three distinct runes that case-fold to each other. // When we do add Unicode letters, we must not assume that upper/lower // are the only case-equivalent pairs. // Perhaps the Kelvin symbol would be disallowed entirely, for example. // Or perhaps it would escape as "!!k", or perhaps as "(212A)". // // Second, it would be nice to allow Unicode marks as well as letters, // but marks include combining marks, and then we must deal not // only with case folding but also normalization: both U+00E9 ('é') // and U+0065 U+0301 ('e' followed by combining acute accent) // look the same on the page and are treated by some file systems // as the same path. If we do allow Unicode marks in paths, there // must be some kind of normalization to allow only one canonical // encoding of any character used in an import path. package module // IMPORTANT NOTE // // This file essentially defines the set of valid import paths for the go command. // There are many subtle considerations, including Unicode ambiguity, // security, network, and file system representations. // // This file also defines the set of valid module path and version combinations, // another topic with many subtle considerations. // // Changes to the semantics in this file require approval from rsc. import ( "errors" "fmt" "path" "sort" "strings" "unicode" "unicode/utf8" "golang.org/x/mod/semver" ) // A Version (for clients, a module.Version) is defined by a module path and version pair. // These are stored in their plain (unescaped) form. type Version struct { // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". Path string // Version is usually a semantic version in canonical form. // There are three exceptions to this general rule. // First, the top-level target of a build has no specific version // and uses Version = "". // Second, during MVS calculations the version "none" is used // to represent the decision to take no version of a given module. // Third, filesystem paths found in "replace" directives are // represented by a path with an empty version. Version string `json:",omitempty"` } // String returns a representation of the Version suitable for logging // (Path@Version, or just Path if Version is empty). func (m Version) String() string { if m.Version == "" { return m.Path } return m.Path + "@" + m.Version } // A ModuleError indicates an error specific to a module. type ModuleError struct { Path string Version string Err error } // VersionError returns a [ModuleError] derived from a [Version] and error, // or err itself if it is already such an error. func VersionError(v Version, err error) error { var mErr *ModuleError if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { return err } return &ModuleError{ Path: v.Path, Version: v.Version, Err: err, } } func (e *ModuleError) Error() string { if v, ok := e.Err.(*InvalidVersionError); ok { return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) } if e.Version != "" { return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) } return fmt.Sprintf("module %s: %v", e.Path, e.Err) } func (e *ModuleError) Unwrap() error { return e.Err } // An InvalidVersionError indicates an error specific to a version, with the // module path unknown or specified externally. // // A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError // must not wrap a ModuleError. type InvalidVersionError struct { Version string Pseudo bool Err error } // noun returns either "version" or "pseudo-version", depending on whether // e.Version is a pseudo-version. func (e *InvalidVersionError) noun() string { if e.Pseudo { return "pseudo-version" } return "version" } func (e *InvalidVersionError) Error() string { return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) } func (e *InvalidVersionError) Unwrap() error { return e.Err } // An InvalidPathError indicates a module, import, or file path doesn't // satisfy all naming constraints. See [CheckPath], [CheckImportPath], // and [CheckFilePath] for specific restrictions. type InvalidPathError struct { Kind string // "module", "import", or "file" Path string Err error } func (e *InvalidPathError) Error() string { return fmt.Sprintf("malformed %s path %q: %v", e.Kind, e.Path, e.Err) } func (e *InvalidPathError) Unwrap() error { return e.Err } // Check checks that a given module path, version pair is valid. // In addition to the path being a valid module path // and the version being a valid semantic version, // the two must correspond. // For example, the path "yaml/v2" only corresponds to // semantic versions beginning with "v2.". func Check(path, version string) error { if err := CheckPath(path); err != nil { return err } if !semver.IsValid(version) { return &ModuleError{ Path: path, Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, } } _, pathMajor, _ := SplitPathVersion(path) if err := CheckPathMajor(version, pathMajor); err != nil { return &ModuleError{Path: path, Err: err} } return nil } // firstPathOK reports whether r can appear in the first element of a module path. // The first element of the path must be an LDH domain name, at least for now. // To avoid case ambiguity, the domain name must be entirely lower case. func firstPathOK(r rune) bool { return r == '-' || r == '.' || '0' <= r && r <= '9' || 'a' <= r && r <= 'z' } // modPathOK reports whether r can appear in a module path element. // Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. // // This matches what "go get" has historically recognized in import paths, // and avoids confusing sequences like '%20' or '+' that would change meaning // if used in a URL. // // TODO(rsc): We would like to allow Unicode letters, but that requires additional // care in the safe encoding (see "escaped paths" above). func modPathOK(r rune) bool { if r < utf8.RuneSelf { return r == '-' || r == '.' || r == '_' || r == '~' || '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' } return false } // importPathOK reports whether r can appear in a package import path element. // // Import paths are intermediate between module paths and file paths: we allow // disallow characters that would be confusing or ambiguous as arguments to // 'go get' (such as '@' and ' ' ), but allow certain characters that are // otherwise-unambiguous on the command line and historically used for some // binary names (such as '++' as a suffix for compiler binaries and wrappers). func importPathOK(r rune) bool { return modPathOK(r) || r == '+' } // fileNameOK reports whether r can appear in a file name. // For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. // If we expand the set of allowed characters here, we have to // work harder at detecting potential case-folding and normalization collisions. // See note about "escaped paths" above. func fileNameOK(r rune) bool { if r < utf8.RuneSelf { // Entire set of ASCII punctuation, from which we remove characters: // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ // We disallow some shell special characters: " ' * < > ? ` | // (Note that some of those are disallowed by the Windows file system as well.) // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). // We allow spaces (U+0020) in file names. const allowed = "!#$%&()+,-.=@[]^_{}~ " if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { return true } return strings.ContainsRune(allowed, r) } // It may be OK to add more ASCII punctuation here, but only carefully. // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. return unicode.IsLetter(r) } // CheckPath checks that a module path is valid. // A valid module path is a valid import path, as checked by [CheckImportPath], // with three additional constraints. // First, the leading path element (up to the first slash, if any), // by convention a domain name, must contain only lower-case ASCII letters, // ASCII digits, dots (U+002E), and dashes (U+002D); // it must contain at least one dot and cannot start with a dash. // Second, for a final path element of the form /vN, where N looks numeric // (ASCII digits and dots) must not begin with a leading zero, must not be /v1, // and must not contain any dots. For paths beginning with "gopkg.in/", // this second requirement is replaced by a requirement that the path // follow the gopkg.in server's conventions. // Third, no path element may begin with a dot. func CheckPath(path string) (err error) { defer func() { if err != nil { err = &InvalidPathError{Kind: "module", Path: path, Err: err} } }() if err := checkPath(path, modulePath); err != nil { return err } i := strings.Index(path, "/") if i < 0 { i = len(path) } if i == 0 { return fmt.Errorf("leading slash") } if !strings.Contains(path[:i], ".") { return fmt.Errorf("missing dot in first path element") } if path[0] == '-' { return fmt.Errorf("leading dash in first path element") } for _, r := range path[:i] { if !firstPathOK(r) { return fmt.Errorf("invalid char %q in first path element", r) } } if _, _, ok := SplitPathVersion(path); !ok { return fmt.Errorf("invalid version") } return nil } // CheckImportPath checks that an import path is valid. // // A valid import path consists of one or more valid path elements // separated by slashes (U+002F). (It must not begin with nor end in a slash.) // // A valid path element is a non-empty string made up of // ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. // It must not end with a dot (U+002E), nor contain two dots in a row. // // The element prefix up to the first dot must not be a reserved file name // on Windows, regardless of case (CON, com1, NuL, and so on). The element // must not have a suffix of a tilde followed by one or more ASCII digits // (to exclude paths elements that look like Windows short-names). // // CheckImportPath may be less restrictive in the future, but see the // top-level package documentation for additional information about // subtleties of Unicode. func CheckImportPath(path string) error { if err := checkPath(path, importPath); err != nil { return &InvalidPathError{Kind: "import", Path: path, Err: err} } return nil } // pathKind indicates what kind of path we're checking. Module paths, // import paths, and file paths have different restrictions. type pathKind int const ( modulePath pathKind = iota importPath filePath ) // checkPath checks that a general path is valid. kind indicates what // specific constraints should be applied. // // checkPath returns an error describing why the path is not valid. // Because these checks apply to module, import, and file paths, // and because other checks may be applied, the caller is expected to wrap // this error with [InvalidPathError]. func checkPath(path string, kind pathKind) error { if !utf8.ValidString(path) { return fmt.Errorf("invalid UTF-8") } if path == "" { return fmt.Errorf("empty string") } if path[0] == '-' && kind != filePath { return fmt.Errorf("leading dash") } if strings.Contains(path, "//") { return fmt.Errorf("double slash") } if path[len(path)-1] == '/' { return fmt.Errorf("trailing slash") } elemStart := 0 for i, r := range path { if r == '/' { if err := checkElem(path[elemStart:i], kind); err != nil { return err } elemStart = i + 1 } } if err := checkElem(path[elemStart:], kind); err != nil { return err } return nil } // checkElem checks whether an individual path element is valid. func checkElem(elem string, kind pathKind) error { if elem == "" { return fmt.Errorf("empty path element") } if strings.Count(elem, ".") == len(elem) { return fmt.Errorf("invalid path element %q", elem) } if elem[0] == '.' && kind == modulePath { return fmt.Errorf("leading dot in path element") } if elem[len(elem)-1] == '.' { return fmt.Errorf("trailing dot in path element") } for _, r := range elem { ok := false switch kind { case modulePath: ok = modPathOK(r) case importPath: ok = importPathOK(r) case filePath: ok = fileNameOK(r) default: panic(fmt.Sprintf("internal error: invalid kind %v", kind)) } if !ok { return fmt.Errorf("invalid char %q", r) } } // Windows disallows a bunch of path elements, sadly. // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file short := elem if i := strings.Index(short, "."); i >= 0 { short = short[:i] } for _, bad := range badWindowsNames { if strings.EqualFold(bad, short) { return fmt.Errorf("%q disallowed as path element component on Windows", short) } } if kind == filePath { // don't check for Windows short-names in file names. They're // only an issue for import paths. return nil } // Reject path components that look like Windows short-names. // Those usually end in a tilde followed by one or more ASCII digits. if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { suffix := short[tilde+1:] suffixIsDigits := true for _, r := range suffix { if r < '0' || r > '9' { suffixIsDigits = false break } } if suffixIsDigits { return fmt.Errorf("trailing tilde and digits in path element") } } return nil } // CheckFilePath checks that a slash-separated file path is valid. // The definition of a valid file path is the same as the definition // of a valid import path except that the set of allowed characters is larger: // all Unicode letters, ASCII digits, the ASCII space character (U+0020), // and the ASCII punctuation characters // “!#$%&()+,-.=@[]^_{}~â€. // (The excluded punctuation characters, " * < > ? ` ' | / \ and :, // have special meanings in certain shells or operating systems.) // // CheckFilePath may be less restrictive in the future, but see the // top-level package documentation for additional information about // subtleties of Unicode. func CheckFilePath(path string) error { if err := checkPath(path, filePath); err != nil { return &InvalidPathError{Kind: "file", Path: path, Err: err} } return nil } // badWindowsNames are the reserved file path elements on Windows. // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file var badWindowsNames = []string{ "CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9", } // SplitPathVersion returns prefix and major version such that prefix+pathMajor == path // and version is either empty or "/vN" for N >= 2. // As a special case, gopkg.in paths are recognized directly; // they require ".vN" instead of "/vN", and for all N, not just N >= 2. // SplitPathVersion returns with ok = false when presented with // a path whose last path element does not satisfy the constraints // applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2". func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { if strings.HasPrefix(path, "gopkg.in/") { return splitGopkgIn(path) } i := len(path) dot := false for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { if path[i-1] == '.' { dot = true } i-- } if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { return path, "", true } prefix, pathMajor = path[:i-2], path[i-2:] if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { return path, "", false } return prefix, pathMajor, true } // splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { if !strings.HasPrefix(path, "gopkg.in/") { return path, "", false } i := len(path) if strings.HasSuffix(path, "-unstable") { i -= len("-unstable") } for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { i-- } if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { // All gopkg.in paths must end in vN for some N. return path, "", false } prefix, pathMajor = path[:i-2], path[i-2:] if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { return path, "", false } return prefix, pathMajor, true } // MatchPathMajor reports whether the semantic version v // matches the path major version pathMajor. // // MatchPathMajor returns true if and only if [CheckPathMajor] returns nil. func MatchPathMajor(v, pathMajor string) bool { return CheckPathMajor(v, pathMajor) == nil } // CheckPathMajor returns a non-nil error if the semantic version v // does not match the path major version pathMajor. func CheckPathMajor(v, pathMajor string) error { // TODO(jayconrod): return errors or panic for invalid inputs. This function // (and others) was covered by integration tests for cmd/go, and surrounding // code protected against invalid inputs like non-canonical versions. if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { pathMajor = strings.TrimSuffix(pathMajor, "-unstable") } if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. return nil } m := semver.Major(v) if pathMajor == "" { if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { return nil } pathMajor = "v0 or v1" } else if pathMajor[0] == '/' || pathMajor[0] == '.' { if m == pathMajor[1:] { return nil } pathMajor = pathMajor[1:] } return &InvalidVersionError{ Version: v, Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), } } // PathMajorPrefix returns the major-version tag prefix implied by pathMajor. // An empty PathMajorPrefix allows either v0 or v1. // // Note that [MatchPathMajor] may accept some versions that do not actually begin // with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' // pathMajor, even though that pathMajor implies 'v1' tagging. func PathMajorPrefix(pathMajor string) string { if pathMajor == "" { return "" } if pathMajor[0] != '/' && pathMajor[0] != '.' { panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") } if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { pathMajor = strings.TrimSuffix(pathMajor, "-unstable") } m := pathMajor[1:] if m != semver.Major(m) { panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") } return m } // CanonicalVersion returns the canonical form of the version string v. // It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible". func CanonicalVersion(v string) string { cv := semver.Canonical(v) if semver.Build(v) == "+incompatible" { cv += "+incompatible" } return cv } // Sort sorts the list by Path, breaking ties by comparing [Version] fields. // The Version fields are interpreted as semantic versions (using [semver.Compare]) // optionally followed by a tie-breaking suffix introduced by a slash character, // like in "v0.0.1/go.mod". func Sort(list []Version) { sort.Slice(list, func(i, j int) bool { mi := list[i] mj := list[j] if mi.Path != mj.Path { return mi.Path < mj.Path } // To help go.sum formatting, allow version/file. // Compare semver prefix by semver rules, // file by string order. vi := mi.Version vj := mj.Version var fi, fj string if k := strings.Index(vi, "/"); k >= 0 { vi, fi = vi[:k], vi[k:] } if k := strings.Index(vj, "/"); k >= 0 { vj, fj = vj[:k], vj[k:] } if vi != vj { return semver.Compare(vi, vj) < 0 } return fi < fj }) } // EscapePath returns the escaped form of the given module path. // It fails if the module path is invalid. func EscapePath(path string) (escaped string, err error) { if err := CheckPath(path); err != nil { return "", err } return escapeString(path) } // EscapeVersion returns the escaped form of the given module version. // Versions are allowed to be in non-semver form but must be valid file names // and not contain exclamation marks. func EscapeVersion(v string) (escaped string, err error) { if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { return "", &InvalidVersionError{ Version: v, Err: fmt.Errorf("disallowed version string"), } } return escapeString(v) } func escapeString(s string) (escaped string, err error) { haveUpper := false for _, r := range s { if r == '!' || r >= utf8.RuneSelf { // This should be disallowed by CheckPath, but diagnose anyway. // The correctness of the escaping loop below depends on it. return "", fmt.Errorf("internal error: inconsistency in EscapePath") } if 'A' <= r && r <= 'Z' { haveUpper = true } } if !haveUpper { return s, nil } var buf []byte for _, r := range s { if 'A' <= r && r <= 'Z' { buf = append(buf, '!', byte(r+'a'-'A')) } else { buf = append(buf, byte(r)) } } return string(buf), nil } // UnescapePath returns the module path for the given escaped path. // It fails if the escaped path is invalid or describes an invalid path. func UnescapePath(escaped string) (path string, err error) { path, ok := unescapeString(escaped) if !ok { return "", fmt.Errorf("invalid escaped module path %q", escaped) } if err := CheckPath(path); err != nil { return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) } return path, nil } // UnescapeVersion returns the version string for the given escaped version. // It fails if the escaped form is invalid or describes an invalid version. // Versions are allowed to be in non-semver form but must be valid file names // and not contain exclamation marks. func UnescapeVersion(escaped string) (v string, err error) { v, ok := unescapeString(escaped) if !ok { return "", fmt.Errorf("invalid escaped version %q", escaped) } if err := checkElem(v, filePath); err != nil { return "", fmt.Errorf("invalid escaped version %q: %v", v, err) } return v, nil } func unescapeString(escaped string) (string, bool) { var buf []byte bang := false for _, r := range escaped { if r >= utf8.RuneSelf { return "", false } if bang { bang = false if r < 'a' || 'z' < r { return "", false } buf = append(buf, byte(r+'A'-'a')) continue } if r == '!' { bang = true continue } if 'A' <= r && r <= 'Z' { return "", false } buf = append(buf, byte(r)) } if bang { return "", false } return string(buf), true } // MatchPrefixPatterns reports whether any path prefix of target matches one of // the glob patterns (as defined by [path.Match]) in the comma-separated globs // list. This implements the algorithm used when matching a module path to the // GOPRIVATE environment variable, as described by 'go help module-private'. // // It ignores any empty or malformed patterns in the list. // Trailing slashes on patterns are ignored. func MatchPrefixPatterns(globs, target string) bool { for globs != "" { // Extract next non-empty glob in comma-separated list. var glob string if i := strings.Index(globs, ","); i >= 0 { glob, globs = globs[:i], globs[i+1:] } else { glob, globs = globs, "" } glob = strings.TrimSuffix(glob, "/") if glob == "" { continue } // A glob with N+1 path elements (N slashes) needs to be matched // against the first N+1 path elements of target, // which end just before the N+1'th slash. n := strings.Count(glob, "/") prefix := target // Walk target, counting slashes, truncating at the N+1'th slash. for i := 0; i < len(target); i++ { if target[i] == '/' { if n == 0 { prefix = target[:i] break } n-- } } if n > 0 { // Not enough prefix elements. continue } matched, _ := path.Match(glob, prefix) if matched { return true } } return false } mod-0.19.0/module/module_test.go000066400000000000000000000270121463702072700165320ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package module import "testing" var checkTests = []struct { path string version string ok bool }{ {"rsc.io/quote", "0.1.0", false}, {"rsc io/quote", "v1.0.0", false}, {"github.com/go-yaml/yaml", "v0.8.0", true}, {"github.com/go-yaml/yaml", "v1.0.0", true}, {"github.com/go-yaml/yaml", "v2.0.0", false}, {"github.com/go-yaml/yaml", "v2.1.5", false}, {"github.com/go-yaml/yaml", "v3.0.0", false}, {"github.com/go-yaml/yaml/v2", "v1.0.0", false}, {"github.com/go-yaml/yaml/v2", "v2.0.0", true}, {"github.com/go-yaml/yaml/v2", "v2.1.5", true}, {"github.com/go-yaml/yaml/v2", "v3.0.0", false}, {"gopkg.in/yaml.v0", "v0.8.0", true}, {"gopkg.in/yaml.v0", "v1.0.0", false}, {"gopkg.in/yaml.v0", "v2.0.0", false}, {"gopkg.in/yaml.v0", "v2.1.5", false}, {"gopkg.in/yaml.v0", "v3.0.0", false}, {"gopkg.in/yaml.v1", "v0.8.0", false}, {"gopkg.in/yaml.v1", "v1.0.0", true}, {"gopkg.in/yaml.v1", "v2.0.0", false}, {"gopkg.in/yaml.v1", "v2.1.5", false}, {"gopkg.in/yaml.v1", "v3.0.0", false}, // For gopkg.in, .v1 means v1 only (not v0). // But early versions of vgo still generated v0 pseudo-versions for it. // Even though now we'd generate those as v1 pseudo-versions, // we accept the old pseudo-versions to avoid breaking existing go.mod files. // For example gopkg.in/yaml.v2@v2.2.1's go.mod requires check.v1 at a v0 pseudo-version. {"gopkg.in/check.v1", "v0.0.0", false}, {"gopkg.in/check.v1", "v0.0.0-20160102150405-abcdef123456", true}, {"gopkg.in/yaml.v2", "v1.0.0", false}, {"gopkg.in/yaml.v2", "v2.0.0", true}, {"gopkg.in/yaml.v2", "v2.1.5", true}, {"gopkg.in/yaml.v2", "v3.0.0", false}, {"rsc.io/quote", "v17.0.0", false}, {"rsc.io/quote", "v17.0.0+incompatible", true}, } func TestCheck(t *testing.T) { for _, tt := range checkTests { err := Check(tt.path, tt.version) if tt.ok && err != nil { t.Errorf("Check(%q, %q) = %v, wanted nil error", tt.path, tt.version, err) } else if !tt.ok && err == nil { t.Errorf("Check(%q, %q) succeeded, wanted error", tt.path, tt.version) } } } var checkPathTests = []struct { path string ok bool importOK bool fileOK bool }{ {"x.y/z", true, true, true}, {"x.y", true, true, true}, {"", false, false, false}, {"x.y/\xFFz", false, false, false}, {"/x.y/z", false, false, false}, {"x./z", false, false, false}, {".x/z", false, true, true}, {"-x/z", false, false, true}, {"x..y/z", true, true, true}, {"x.y/z/../../w", false, false, false}, {"x.y//z", false, false, false}, {"x.y/z//w", false, false, false}, {"x.y/z/", false, false, false}, {"x.y/z/v0", false, true, true}, {"x.y/z/v1", false, true, true}, {"x.y/z/v2", true, true, true}, {"x.y/z/v2.0", false, true, true}, {"X.y/z", false, true, true}, {"!x.y/z", false, false, true}, {"_x.y/z", false, true, true}, {"x.y!/z", false, false, true}, {"x.y\"/z", false, false, false}, {"x.y#/z", false, false, true}, {"x.y$/z", false, false, true}, {"x.y%/z", false, false, true}, {"x.y&/z", false, false, true}, {"x.y'/z", false, false, false}, {"x.y(/z", false, false, true}, {"x.y)/z", false, false, true}, {"x.y*/z", false, false, false}, {"x.y+/z", false, true, true}, {"x.y,/z", false, false, true}, {"x.y-/z", true, true, true}, {"x.y./zt", false, false, false}, {"x.y:/z", false, false, false}, {"x.y;/z", false, false, false}, {"x.y/z", false, false, false}, {"x.y?/z", false, false, false}, {"x.y@/z", false, false, true}, {"x.y[/z", false, false, true}, {"x.y\\/z", false, false, false}, {"x.y]/z", false, false, true}, {"x.y^/z", false, false, true}, {"x.y_/z", false, true, true}, {"x.y`/z", false, false, false}, {"x.y{/z", false, false, true}, {"x.y}/z", false, false, true}, {"x.y~/z", false, true, true}, {"x.y/z!", false, false, true}, {"x.y/z\"", false, false, false}, {"x.y/z#", false, false, true}, {"x.y/z$", false, false, true}, {"x.y/z%", false, false, true}, {"x.y/z&", false, false, true}, {"x.y/z'", false, false, false}, {"x.y/z(", false, false, true}, {"x.y/z)", false, false, true}, {"x.y/z*", false, false, false}, {"x.y/z++", false, true, true}, {"x.y/z,", false, false, true}, {"x.y/z-", true, true, true}, {"x.y/z.t", true, true, true}, {"x.y/z/t", true, true, true}, {"x.y/z:", false, false, false}, {"x.y/z;", false, false, false}, {"x.y/z<", false, false, false}, {"x.y/z=", false, false, true}, {"x.y/z>", false, false, false}, {"x.y/z?", false, false, false}, {"x.y/z@", false, false, true}, {"x.y/z[", false, false, true}, {"x.y/z\\", false, false, false}, {"x.y/z]", false, false, true}, {"x.y/z^", false, false, true}, {"x.y/z_", true, true, true}, {"x.y/z`", false, false, false}, {"x.y/z{", false, false, true}, {"x.y/z}", false, false, true}, {"x.y/z~", true, true, true}, {"x.y/x.foo", true, true, true}, {"x.y/aux.foo", false, false, false}, {"x.y/prn", false, false, false}, {"x.y/prn2", true, true, true}, {"x.y/com", true, true, true}, {"x.y/com1", false, false, false}, {"x.y/com1.txt", false, false, false}, {"x.y/calm1", true, true, true}, {"x.y/z~", true, true, true}, {"x.y/z~0", false, false, true}, {"x.y/z~09", false, false, true}, {"x.y/z09", true, true, true}, {"x.y/z09~", true, true, true}, {"x.y/z09~09z", true, true, true}, {"x.y/z09~09z~09", false, false, true}, {"github.com/!123/logrus", false, false, true}, // TODO: CL 41822 allowed Unicode letters in old "go get" // without due consideration of the implications, and only on github.com (!). // For now, we disallow non-ASCII characters in module mode, // in both module paths and general import paths, // until we can get the implications right. // When we do, we'll enable them everywhere, not just for GitHub. {"github.com/user/unicode/иÑпытание", false, false, true}, {"../x", false, false, false}, {"./y", false, false, false}, {"x:y", false, false, false}, {`\temp\foo`, false, false, false}, {".gitignore", false, true, true}, {".github/ISSUE_TEMPLATE", false, true, true}, {"x☺y", false, false, false}, } func TestCheckPath(t *testing.T) { for _, tt := range checkPathTests { err := CheckPath(tt.path) if tt.ok && err != nil { t.Errorf("CheckPath(%q) = %v, wanted nil error", tt.path, err) } else if !tt.ok && err == nil { t.Errorf("CheckPath(%q) succeeded, wanted error", tt.path) } err = CheckImportPath(tt.path) if tt.importOK && err != nil { t.Errorf("CheckImportPath(%q) = %v, wanted nil error", tt.path, err) } else if !tt.importOK && err == nil { t.Errorf("CheckImportPath(%q) succeeded, wanted error", tt.path) } err = CheckFilePath(tt.path) if tt.fileOK && err != nil { t.Errorf("CheckFilePath(%q) = %v, wanted nil error", tt.path, err) } else if !tt.fileOK && err == nil { t.Errorf("CheckFilePath(%q) succeeded, wanted error", tt.path) } } } var splitPathVersionTests = []struct { pathPrefix string version string }{ {"x.y/z", ""}, {"x.y/z", "/v2"}, {"x.y/z", "/v3"}, {"x.y/v", ""}, {"gopkg.in/yaml", ".v0"}, {"gopkg.in/yaml", ".v1"}, {"gopkg.in/yaml", ".v2"}, {"gopkg.in/yaml", ".v3"}, } func TestSplitPathVersion(t *testing.T) { for _, tt := range splitPathVersionTests { pathPrefix, version, ok := SplitPathVersion(tt.pathPrefix + tt.version) if pathPrefix != tt.pathPrefix || version != tt.version || !ok { t.Errorf("SplitPathVersion(%q) = %q, %q, %v, want %q, %q, true", tt.pathPrefix+tt.version, pathPrefix, version, ok, tt.pathPrefix, tt.version) } } for _, tt := range checkPathTests { pathPrefix, version, ok := SplitPathVersion(tt.path) if pathPrefix+version != tt.path { t.Errorf("SplitPathVersion(%q) = %q, %q, %v, doesn't add to input", tt.path, pathPrefix, version, ok) } } } var escapeTests = []struct { path string esc string // empty means same as path }{ {path: "ascii.com/abcdefghijklmnopqrstuvwxyz.-/~_0123456789"}, {path: "github.com/GoogleCloudPlatform/omega", esc: "github.com/!google!cloud!platform/omega"}, } func TestEscapePath(t *testing.T) { // Check invalid paths. for _, tt := range checkPathTests { if !tt.ok { _, err := EscapePath(tt.path) if err == nil { t.Errorf("EscapePath(%q): succeeded, want error (invalid path)", tt.path) } } } // Check encodings. for _, tt := range escapeTests { esc, err := EscapePath(tt.path) if err != nil { t.Errorf("EscapePath(%q): unexpected error: %v", tt.path, err) continue } want := tt.esc if want == "" { want = tt.path } if esc != want { t.Errorf("EscapePath(%q) = %q, want %q", tt.path, esc, want) } } } var badUnescape = []string{ "github.com/GoogleCloudPlatform/omega", "github.com/!google!cloud!platform!/omega", "github.com/!0google!cloud!platform/omega", "github.com/!_google!cloud!platform/omega", "github.com/!!google!cloud!platform/omega", "", } func TestUnescapePath(t *testing.T) { // Check invalid decodings. for _, bad := range badUnescape { _, err := UnescapePath(bad) if err == nil { t.Errorf("UnescapePath(%q): succeeded, want error (invalid decoding)", bad) } } // Check invalid paths (or maybe decodings). for _, tt := range checkPathTests { if !tt.ok { path, err := UnescapePath(tt.path) if err == nil { t.Errorf("UnescapePath(%q) = %q, want error (invalid path)", tt.path, path) } } } // Check encodings. for _, tt := range escapeTests { esc := tt.esc if esc == "" { esc = tt.path } path, err := UnescapePath(esc) if err != nil { t.Errorf("UnescapePath(%q): unexpected error: %v", esc, err) continue } if path != tt.path { t.Errorf("UnescapePath(%q) = %q, want %q", esc, path, tt.path) } } } func TestMatchPathMajor(t *testing.T) { for _, test := range []struct { v, pathMajor string want bool }{ {"v0.0.0", "", true}, {"v0.0.0", "/v2", false}, {"v0.0.0", ".v0", true}, {"v0.0.0-20190510104115-cbcb75029529", ".v1", true}, {"v1.0.0", "/v2", false}, {"v1.0.0", ".v1", true}, {"v1.0.0", ".v1-unstable", true}, {"v2.0.0+incompatible", "", true}, {"v2.0.0", "", false}, {"v2.0.0", "/v2", true}, {"v2.0.0", ".v2", true}, } { if got := MatchPathMajor(test.v, test.pathMajor); got != test.want { t.Errorf("MatchPathMajor(%q, %q) = %v, want %v", test.v, test.pathMajor, got, test.want) } } } func TestMatchPrefixPatterns(t *testing.T) { for _, test := range []struct { globs, target string want bool }{ {"", "rsc.io/quote", false}, {"/", "rsc.io/quote", false}, {"*/quote", "rsc.io/quote", true}, {"*/quo", "rsc.io/quote", false}, {"*/quo??", "rsc.io/quote", true}, {"*/quo*", "rsc.io/quote", true}, {"*quo*", "rsc.io/quote", false}, {"rsc.io", "rsc.io/quote", true}, {"*.io", "rsc.io/quote", true}, {"rsc.io/", "rsc.io/quote", true}, {"rsc", "rsc.io/quote", false}, {"rsc*", "rsc.io/quote", true}, {"rsc.io", "rsc.io/quote/v3", true}, {"*/quote", "rsc.io/quote/v3", true}, {"*/quote/", "rsc.io/quote/v3", true}, {"*/quote/*", "rsc.io/quote/v3", true}, {"*/quote/*/", "rsc.io/quote/v3", true}, {"*/v3", "rsc.io/quote/v3", false}, {"*/*/v3", "rsc.io/quote/v3", true}, {"*/*/*", "rsc.io/quote/v3", true}, {"*/*/*/", "rsc.io/quote/v3", true}, {"*/*/*", "rsc.io/quote", false}, {"*/*/*/", "rsc.io/quote", false}, {"*/*/*,,", "rsc.io/quote", false}, {"*/*/*,,*/quote", "rsc.io/quote", true}, {",,*/quote", "rsc.io/quote", true}, } { if got := MatchPrefixPatterns(test.globs, test.target); got != test.want { t.Errorf("MatchPrefixPatterns(%q, %q) = %t, want %t", test.globs, test.target, got, test.want) } } } mod-0.19.0/module/pseudo.go000066400000000000000000000206661463702072700155150ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Pseudo-versions // // Code authors are expected to tag the revisions they want users to use, // including prereleases. However, not all authors tag versions at all, // and not all commits a user might want to try will have tags. // A pseudo-version is a version with a special form that allows us to // address an untagged commit and order that version with respect to // other versions we might encounter. // // A pseudo-version takes one of the general forms: // // (1) vX.0.0-yyyymmddhhmmss-abcdef123456 // (2) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 // (3) vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible // (4) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 // (5) vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible // // If there is no recently tagged version with the right major version vX, // then form (1) is used, creating a space of pseudo-versions at the bottom // of the vX version range, less than any tagged version, including the unlikely v0.0.0. // // If the most recent tagged version before the target commit is vX.Y.Z or vX.Y.Z+incompatible, // then the pseudo-version uses form (2) or (3), making it a prerelease for the next // possible semantic version after vX.Y.Z. The leading 0 segment in the prerelease string // ensures that the pseudo-version compares less than possible future explicit prereleases // like vX.Y.(Z+1)-rc1 or vX.Y.(Z+1)-1. // // If the most recent tagged version before the target commit is vX.Y.Z-pre or vX.Y.Z-pre+incompatible, // then the pseudo-version uses form (4) or (5), making it a slightly later prerelease. package module import ( "errors" "fmt" "strings" "time" "golang.org/x/mod/internal/lazyregexp" "golang.org/x/mod/semver" ) var pseudoVersionRE = lazyregexp.New(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$`) const PseudoVersionTimestampFormat = "20060102150405" // PseudoVersion returns a pseudo-version for the given major version ("v1") // preexisting older tagged version ("" or "v1.2.3" or "v1.2.3-pre"), revision time, // and revision identifier (usually a 12-byte commit hash prefix). func PseudoVersion(major, older string, t time.Time, rev string) string { if major == "" { major = "v0" } segment := fmt.Sprintf("%s-%s", t.UTC().Format(PseudoVersionTimestampFormat), rev) build := semver.Build(older) older = semver.Canonical(older) if older == "" { return major + ".0.0-" + segment // form (1) } if semver.Prerelease(older) != "" { return older + ".0." + segment + build // form (4), (5) } // Form (2), (3). // Extract patch from vMAJOR.MINOR.PATCH i := strings.LastIndex(older, ".") + 1 v, patch := older[:i], older[i:] // Reassemble. return v + incDecimal(patch) + "-0." + segment + build } // ZeroPseudoVersion returns a pseudo-version with a zero timestamp and // revision, which may be used as a placeholder. func ZeroPseudoVersion(major string) string { return PseudoVersion(major, "", time.Time{}, "000000000000") } // incDecimal returns the decimal string incremented by 1. func incDecimal(decimal string) string { // Scan right to left turning 9s to 0s until you find a digit to increment. digits := []byte(decimal) i := len(digits) - 1 for ; i >= 0 && digits[i] == '9'; i-- { digits[i] = '0' } if i >= 0 { digits[i]++ } else { // digits is all zeros digits[0] = '1' digits = append(digits, '0') } return string(digits) } // decDecimal returns the decimal string decremented by 1, or the empty string // if the decimal is all zeroes. func decDecimal(decimal string) string { // Scan right to left turning 0s to 9s until you find a digit to decrement. digits := []byte(decimal) i := len(digits) - 1 for ; i >= 0 && digits[i] == '0'; i-- { digits[i] = '9' } if i < 0 { // decimal is all zeros return "" } if i == 0 && digits[i] == '1' && len(digits) > 1 { digits = digits[1:] } else { digits[i]-- } return string(digits) } // IsPseudoVersion reports whether v is a pseudo-version. func IsPseudoVersion(v string) bool { return strings.Count(v, "-") >= 2 && semver.IsValid(v) && pseudoVersionRE.MatchString(v) } // IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, // timestamp, and revision, as returned by [ZeroPseudoVersion]. func IsZeroPseudoVersion(v string) bool { return v == ZeroPseudoVersion(semver.Major(v)) } // PseudoVersionTime returns the time stamp of the pseudo-version v. // It returns an error if v is not a pseudo-version or if the time stamp // embedded in the pseudo-version is not a valid time. func PseudoVersionTime(v string) (time.Time, error) { _, timestamp, _, _, err := parsePseudoVersion(v) if err != nil { return time.Time{}, err } t, err := time.Parse("20060102150405", timestamp) if err != nil { return time.Time{}, &InvalidVersionError{ Version: v, Pseudo: true, Err: fmt.Errorf("malformed time %q", timestamp), } } return t, nil } // PseudoVersionRev returns the revision identifier of the pseudo-version v. // It returns an error if v is not a pseudo-version. func PseudoVersionRev(v string) (rev string, err error) { _, _, rev, _, err = parsePseudoVersion(v) return } // PseudoVersionBase returns the canonical parent version, if any, upon which // the pseudo-version v is based. // // If v has no parent version (that is, if it is "vX.0.0-[…]"), // PseudoVersionBase returns the empty string and a nil error. func PseudoVersionBase(v string) (string, error) { base, _, _, build, err := parsePseudoVersion(v) if err != nil { return "", err } switch pre := semver.Prerelease(base); pre { case "": // vX.0.0-yyyymmddhhmmss-abcdef123456 → "" if build != "" { // Pseudo-versions of the form vX.0.0-yyyymmddhhmmss-abcdef123456+incompatible // are nonsensical: the "vX.0.0-" prefix implies that there is no parent tag, // but the "+incompatible" suffix implies that the major version of // the parent tag is not compatible with the module's import path. // // There are a few such entries in the index generated by proxy.golang.org, // but we believe those entries were generated by the proxy itself. return "", &InvalidVersionError{ Version: v, Pseudo: true, Err: fmt.Errorf("lacks base version, but has build metadata %q", build), } } return "", nil case "-0": // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z // vX.Y.(Z+1)-0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z+incompatible base = strings.TrimSuffix(base, pre) i := strings.LastIndexByte(base, '.') if i < 0 { panic("base from parsePseudoVersion missing patch number: " + base) } patch := decDecimal(base[i+1:]) if patch == "" { // vX.0.0-0 is invalid, but has been observed in the wild in the index // generated by requests to proxy.golang.org. // // NOTE(bcmills): I cannot find a historical bug that accounts for // pseudo-versions of this form, nor have I seen such versions in any // actual go.mod files. If we find actual examples of this form and a // reasonable theory of how they came into existence, it seems fine to // treat them as equivalent to vX.0.0 (especially since the invalid // pseudo-versions have lower precedence than the real ones). For now, we // reject them. return "", &InvalidVersionError{ Version: v, Pseudo: true, Err: fmt.Errorf("version before %s would have negative patch number", base), } } return base[:i+1] + patch + build, nil default: // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456 → vX.Y.Z-pre // vX.Y.Z-pre.0.yyyymmddhhmmss-abcdef123456+incompatible → vX.Y.Z-pre+incompatible if !strings.HasSuffix(base, ".0") { panic(`base from parsePseudoVersion missing ".0" before date: ` + base) } return strings.TrimSuffix(base, ".0") + build, nil } } var errPseudoSyntax = errors.New("syntax error") func parsePseudoVersion(v string) (base, timestamp, rev, build string, err error) { if !IsPseudoVersion(v) { return "", "", "", "", &InvalidVersionError{ Version: v, Pseudo: true, Err: errPseudoSyntax, } } build = semver.Build(v) v = strings.TrimSuffix(v, build) j := strings.LastIndex(v, "-") v, rev = v[:j], v[j+1:] i := strings.LastIndex(v, "-") if j := strings.LastIndex(v, "."); j > i { base = v[:j] // "vX.Y.Z-pre.0" or "vX.Y.(Z+1)-0" timestamp = v[j+1:] } else { base = v[:i] // "vX.0.0" timestamp = v[i+1:] } return base, timestamp, rev, build, nil } mod-0.19.0/module/pseudo_test.go000066400000000000000000000105431463702072700165450ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package module import ( "testing" "time" ) var pseudoTests = []struct { major string older string version string }{ {"", "", "v0.0.0-20060102150405-hash"}, {"v0", "", "v0.0.0-20060102150405-hash"}, {"v1", "", "v1.0.0-20060102150405-hash"}, {"v2", "", "v2.0.0-20060102150405-hash"}, {"unused", "v0.0.0", "v0.0.1-0.20060102150405-hash"}, {"unused", "v1.2.3", "v1.2.4-0.20060102150405-hash"}, {"unused", "v1.2.99999999999999999", "v1.2.100000000000000000-0.20060102150405-hash"}, {"unused", "v1.2.3-pre", "v1.2.3-pre.0.20060102150405-hash"}, {"unused", "v1.3.0-pre", "v1.3.0-pre.0.20060102150405-hash"}, {"unused", "v0.0.0--", "v0.0.0--.0.20060102150405-hash"}, {"unused", "v1.0.0+metadata", "v1.0.1-0.20060102150405-hash+metadata"}, {"unused", "v2.0.0+incompatible", "v2.0.1-0.20060102150405-hash+incompatible"}, {"unused", "v2.3.0-pre+incompatible", "v2.3.0-pre.0.20060102150405-hash+incompatible"}, } var pseudoTime = time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC) func TestPseudoVersion(t *testing.T) { for _, tt := range pseudoTests { v := PseudoVersion(tt.major, tt.older, pseudoTime, "hash") if v != tt.version { t.Errorf("PseudoVersion(%q, %q, ...) = %v, want %v", tt.major, tt.older, v, tt.version) } } } func TestIsPseudoVersion(t *testing.T) { for _, tt := range pseudoTests { if !IsPseudoVersion(tt.version) { t.Errorf("IsPseudoVersion(%q) = false, want true", tt.version) } if IsPseudoVersion(tt.older) { t.Errorf("IsPseudoVersion(%q) = true, want false", tt.older) } } } func TestPseudoVersionTime(t *testing.T) { for _, tt := range pseudoTests { tm, err := PseudoVersionTime(tt.version) if tm != pseudoTime || err != nil { t.Errorf("PseudoVersionTime(%q) = %v, %v, want %v, nil", tt.version, tm.Format(time.RFC3339), err, pseudoTime.Format(time.RFC3339)) } tm, err = PseudoVersionTime(tt.older) if tm != (time.Time{}) || err == nil { t.Errorf("PseudoVersionTime(%q) = %v, %v, want %v, error", tt.older, tm.Format(time.RFC3339), err, time.Time{}.Format(time.RFC3339)) } } } func TestInvalidPseudoVersionTime(t *testing.T) { const v = "---" if _, err := PseudoVersionTime(v); err == nil { t.Error("expected error, got nil instead") } } func TestPseudoVersionRev(t *testing.T) { for _, tt := range pseudoTests { rev, err := PseudoVersionRev(tt.version) if rev != "hash" || err != nil { t.Errorf("PseudoVersionRev(%q) = %q, %v, want %q, nil", tt.older, rev, err, "hash") } rev, err = PseudoVersionRev(tt.older) if rev != "" || err == nil { t.Errorf("PseudoVersionRev(%q) = %q, %v, want %q, error", tt.older, rev, err, "") } } } func TestPseudoVersionBase(t *testing.T) { for _, tt := range pseudoTests { base, err := PseudoVersionBase(tt.version) if err != nil { t.Errorf("PseudoVersionBase(%q): %v", tt.version, err) } else if base != tt.older { t.Errorf("PseudoVersionBase(%q) = %q; want %q", tt.version, base, tt.older) } } } func TestInvalidPseudoVersionBase(t *testing.T) { for _, in := range []string{ "v0.0.0", "v0.0.0-", // malformed: empty prerelease "v0.0.0-0.20060102150405-hash", // Z+1 == 0 "v0.1.0-0.20060102150405-hash", // Z+1 == 0 "v1.0.0-0.20060102150405-hash", // Z+1 == 0 "v0.0.0-20060102150405-hash+incompatible", // "+incompatible without base version "v0.0.0-20060102150405-hash+metadata", // other metadata without base version } { base, err := PseudoVersionBase(in) if err == nil || base != "" { t.Errorf(`PseudoVersionBase(%q) = %q, %v; want "", error`, in, base, err) } } } func TestIncDecimal(t *testing.T) { cases := []struct { in, want string }{ {"0", "1"}, {"1", "2"}, {"99", "100"}, {"100", "101"}, {"101", "102"}, } for _, tc := range cases { got := incDecimal(tc.in) if got != tc.want { t.Fatalf("incDecimal(%q) = %q; want %q", tc.in, tc.want, got) } } } func TestDecDecimal(t *testing.T) { cases := []struct { in, want string }{ {"", ""}, {"0", ""}, {"00", ""}, {"1", "0"}, {"2", "1"}, {"99", "98"}, {"100", "99"}, {"101", "100"}, } for _, tc := range cases { got := decDecimal(tc.in) if got != tc.want { t.Fatalf("decDecimal(%q) = %q; want %q", tc.in, tc.want, got) } } } mod-0.19.0/semver/000077500000000000000000000000001463702072700136715ustar00rootroot00000000000000mod-0.19.0/semver/semver.go000066400000000000000000000215501463702072700155240ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package semver implements comparison of semantic version strings. // In this package, semantic version strings must begin with a leading "v", // as in "v1.0.0". // // The general form of a semantic version string accepted by this package is // // vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] // // where square brackets indicate optional parts of the syntax; // MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; // PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers // using only alphanumeric characters and hyphens; and // all-numeric PRERELEASE identifiers must not have leading zeros. // // This package follows Semantic Versioning 2.0.0 (see semver.org) // with two exceptions. First, it requires the "v" prefix. Second, it recognizes // vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) // as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. package semver import "sort" // parsed returns the parsed form of a semantic version string. type parsed struct { major string minor string patch string short string prerelease string build string } // IsValid reports whether v is a valid semantic version string. func IsValid(v string) bool { _, ok := parse(v) return ok } // Canonical returns the canonical formatting of the semantic version v. // It fills in any missing .MINOR or .PATCH and discards build metadata. // Two semantic versions compare equal only if their canonical formattings // are identical strings. // The canonical invalid semantic version is the empty string. func Canonical(v string) string { p, ok := parse(v) if !ok { return "" } if p.build != "" { return v[:len(v)-len(p.build)] } if p.short != "" { return v + p.short } return v } // Major returns the major version prefix of the semantic version v. // For example, Major("v2.1.0") == "v2". // If v is an invalid semantic version string, Major returns the empty string. func Major(v string) string { pv, ok := parse(v) if !ok { return "" } return v[:1+len(pv.major)] } // MajorMinor returns the major.minor version prefix of the semantic version v. // For example, MajorMinor("v2.1.0") == "v2.1". // If v is an invalid semantic version string, MajorMinor returns the empty string. func MajorMinor(v string) string { pv, ok := parse(v) if !ok { return "" } i := 1 + len(pv.major) if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { return v[:j] } return v[:i] + "." + pv.minor } // Prerelease returns the prerelease suffix of the semantic version v. // For example, Prerelease("v2.1.0-pre+meta") == "-pre". // If v is an invalid semantic version string, Prerelease returns the empty string. func Prerelease(v string) string { pv, ok := parse(v) if !ok { return "" } return pv.prerelease } // Build returns the build suffix of the semantic version v. // For example, Build("v2.1.0+meta") == "+meta". // If v is an invalid semantic version string, Build returns the empty string. func Build(v string) string { pv, ok := parse(v) if !ok { return "" } return pv.build } // Compare returns an integer comparing two versions according to // semantic version precedence. // The result will be 0 if v == w, -1 if v < w, or +1 if v > w. // // An invalid semantic version string is considered less than a valid one. // All invalid semantic version strings compare equal to each other. func Compare(v, w string) int { pv, ok1 := parse(v) pw, ok2 := parse(w) if !ok1 && !ok2 { return 0 } if !ok1 { return -1 } if !ok2 { return +1 } if c := compareInt(pv.major, pw.major); c != 0 { return c } if c := compareInt(pv.minor, pw.minor); c != 0 { return c } if c := compareInt(pv.patch, pw.patch); c != 0 { return c } return comparePrerelease(pv.prerelease, pw.prerelease) } // Max canonicalizes its arguments and then returns the version string // that compares greater. // // Deprecated: use [Compare] instead. In most cases, returning a canonicalized // version is not expected or desired. func Max(v, w string) string { v = Canonical(v) w = Canonical(w) if Compare(v, w) > 0 { return v } return w } // ByVersion implements [sort.Interface] for sorting semantic version strings. type ByVersion []string func (vs ByVersion) Len() int { return len(vs) } func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } func (vs ByVersion) Less(i, j int) bool { cmp := Compare(vs[i], vs[j]) if cmp != 0 { return cmp < 0 } return vs[i] < vs[j] } // Sort sorts a list of semantic version strings using [ByVersion]. func Sort(list []string) { sort.Sort(ByVersion(list)) } func parse(v string) (p parsed, ok bool) { if v == "" || v[0] != 'v' { return } p.major, v, ok = parseInt(v[1:]) if !ok { return } if v == "" { p.minor = "0" p.patch = "0" p.short = ".0.0" return } if v[0] != '.' { ok = false return } p.minor, v, ok = parseInt(v[1:]) if !ok { return } if v == "" { p.patch = "0" p.short = ".0" return } if v[0] != '.' { ok = false return } p.patch, v, ok = parseInt(v[1:]) if !ok { return } if len(v) > 0 && v[0] == '-' { p.prerelease, v, ok = parsePrerelease(v) if !ok { return } } if len(v) > 0 && v[0] == '+' { p.build, v, ok = parseBuild(v) if !ok { return } } if v != "" { ok = false return } ok = true return } func parseInt(v string) (t, rest string, ok bool) { if v == "" { return } if v[0] < '0' || '9' < v[0] { return } i := 1 for i < len(v) && '0' <= v[i] && v[i] <= '9' { i++ } if v[0] == '0' && i != 1 { return } return v[:i], v[i:], true } func parsePrerelease(v string) (t, rest string, ok bool) { // "A pre-release version MAY be denoted by appending a hyphen and // a series of dot separated identifiers immediately following the patch version. // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." if v == "" || v[0] != '-' { return } i := 1 start := 1 for i < len(v) && v[i] != '+' { if !isIdentChar(v[i]) && v[i] != '.' { return } if v[i] == '.' { if start == i || isBadNum(v[start:i]) { return } start = i + 1 } i++ } if start == i || isBadNum(v[start:i]) { return } return v[:i], v[i:], true } func parseBuild(v string) (t, rest string, ok bool) { if v == "" || v[0] != '+' { return } i := 1 start := 1 for i < len(v) { if !isIdentChar(v[i]) && v[i] != '.' { return } if v[i] == '.' { if start == i { return } start = i + 1 } i++ } if start == i { return } return v[:i], v[i:], true } func isIdentChar(c byte) bool { return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' } func isBadNum(v string) bool { i := 0 for i < len(v) && '0' <= v[i] && v[i] <= '9' { i++ } return i == len(v) && i > 1 && v[0] == '0' } func isNum(v string) bool { i := 0 for i < len(v) && '0' <= v[i] && v[i] <= '9' { i++ } return i == len(v) } func compareInt(x, y string) int { if x == y { return 0 } if len(x) < len(y) { return -1 } if len(x) > len(y) { return +1 } if x < y { return -1 } else { return +1 } } func comparePrerelease(x, y string) int { // "When major, minor, and patch are equal, a pre-release version has // lower precedence than a normal version. // Example: 1.0.0-alpha < 1.0.0. // Precedence for two pre-release versions with the same major, minor, // and patch version MUST be determined by comparing each dot separated // identifier from left to right until a difference is found as follows: // identifiers consisting of only digits are compared numerically and // identifiers with letters or hyphens are compared lexically in ASCII // sort order. Numeric identifiers always have lower precedence than // non-numeric identifiers. A larger set of pre-release fields has a // higher precedence than a smaller set, if all of the preceding // identifiers are equal. // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." if x == y { return 0 } if x == "" { return +1 } if y == "" { return -1 } for x != "" && y != "" { x = x[1:] // skip - or . y = y[1:] // skip - or . var dx, dy string dx, x = nextIdent(x) dy, y = nextIdent(y) if dx != dy { ix := isNum(dx) iy := isNum(dy) if ix != iy { if ix { return -1 } else { return +1 } } if ix { if len(dx) < len(dy) { return -1 } if len(dx) > len(dy) { return +1 } } if dx < dy { return -1 } else { return +1 } } } if x == "" { return -1 } else { return +1 } } func nextIdent(x string) (dx, rest string) { i := 0 for i < len(x) && x[i] != '.' { i++ } return x[:i], x[i:] } mod-0.19.0/semver/semver_test.go000066400000000000000000000100601463702072700165550ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package semver import ( "math/rand" "sort" "strings" "testing" ) var tests = []struct { in string out string }{ {"bad", ""}, {"v1-alpha.beta.gamma", ""}, {"v1-pre", ""}, {"v1+meta", ""}, {"v1-pre+meta", ""}, {"v1.2-pre", ""}, {"v1.2+meta", ""}, {"v1.2-pre+meta", ""}, {"v1.0.0-alpha", "v1.0.0-alpha"}, {"v1.0.0-alpha.1", "v1.0.0-alpha.1"}, {"v1.0.0-alpha.beta", "v1.0.0-alpha.beta"}, {"v1.0.0-beta", "v1.0.0-beta"}, {"v1.0.0-beta.2", "v1.0.0-beta.2"}, {"v1.0.0-beta.11", "v1.0.0-beta.11"}, {"v1.0.0-rc.1", "v1.0.0-rc.1"}, {"v1", "v1.0.0"}, {"v1.0", "v1.0.0"}, {"v1.0.0", "v1.0.0"}, {"v1.2", "v1.2.0"}, {"v1.2.0", "v1.2.0"}, {"v1.2.3-456", "v1.2.3-456"}, {"v1.2.3-456.789", "v1.2.3-456.789"}, {"v1.2.3-456-789", "v1.2.3-456-789"}, {"v1.2.3-456a", "v1.2.3-456a"}, {"v1.2.3-pre", "v1.2.3-pre"}, {"v1.2.3-pre+meta", "v1.2.3-pre"}, {"v1.2.3-pre.1", "v1.2.3-pre.1"}, {"v1.2.3-zzz", "v1.2.3-zzz"}, {"v1.2.3", "v1.2.3"}, {"v1.2.3+meta", "v1.2.3"}, {"v1.2.3+meta-pre", "v1.2.3"}, {"v1.2.3+meta-pre.sha.256a", "v1.2.3"}, } func TestIsValid(t *testing.T) { for _, tt := range tests { ok := IsValid(tt.in) if ok != (tt.out != "") { t.Errorf("IsValid(%q) = %v, want %v", tt.in, ok, !ok) } } } func TestCanonical(t *testing.T) { for _, tt := range tests { out := Canonical(tt.in) if out != tt.out { t.Errorf("Canonical(%q) = %q, want %q", tt.in, out, tt.out) } } } func TestMajor(t *testing.T) { for _, tt := range tests { out := Major(tt.in) want := "" if i := strings.Index(tt.out, "."); i >= 0 { want = tt.out[:i] } if out != want { t.Errorf("Major(%q) = %q, want %q", tt.in, out, want) } } } func TestMajorMinor(t *testing.T) { for _, tt := range tests { out := MajorMinor(tt.in) var want string if tt.out != "" { want = tt.in if i := strings.Index(want, "+"); i >= 0 { want = want[:i] } if i := strings.Index(want, "-"); i >= 0 { want = want[:i] } switch strings.Count(want, ".") { case 0: want += ".0" case 1: // ok case 2: want = want[:strings.LastIndex(want, ".")] } } if out != want { t.Errorf("MajorMinor(%q) = %q, want %q", tt.in, out, want) } } } func TestPrerelease(t *testing.T) { for _, tt := range tests { pre := Prerelease(tt.in) var want string if tt.out != "" { if i := strings.Index(tt.out, "-"); i >= 0 { want = tt.out[i:] } } if pre != want { t.Errorf("Prerelease(%q) = %q, want %q", tt.in, pre, want) } } } func TestBuild(t *testing.T) { for _, tt := range tests { build := Build(tt.in) var want string if tt.out != "" { if i := strings.Index(tt.in, "+"); i >= 0 { want = tt.in[i:] } } if build != want { t.Errorf("Build(%q) = %q, want %q", tt.in, build, want) } } } func TestCompare(t *testing.T) { for i, ti := range tests { for j, tj := range tests { cmp := Compare(ti.in, tj.in) var want int if ti.out == tj.out { want = 0 } else if i < j { want = -1 } else { want = +1 } if cmp != want { t.Errorf("Compare(%q, %q) = %d, want %d", ti.in, tj.in, cmp, want) } } } } func TestSort(t *testing.T) { versions := make([]string, len(tests)) for i, test := range tests { versions[i] = test.in } rand.Shuffle(len(versions), func(i, j int) { versions[i], versions[j] = versions[j], versions[i] }) Sort(versions) if !sort.IsSorted(ByVersion(versions)) { t.Errorf("list is not sorted:\n%s", strings.Join(versions, "\n")) } } func TestMax(t *testing.T) { for i, ti := range tests { for j, tj := range tests { max := Max(ti.in, tj.in) want := Canonical(ti.in) if i < j { want = Canonical(tj.in) } if max != want { t.Errorf("Max(%q, %q) = %q, want %q", ti.in, tj.in, max, want) } } } } var ( v1 = "v1.0.0+metadata-dash" v2 = "v1.0.0+metadata-dash1" ) func BenchmarkCompare(b *testing.B) { for i := 0; i < b.N; i++ { if Compare(v1, v2) != 0 { b.Fatalf("bad compare") } } } mod-0.19.0/sumdb/000077500000000000000000000000001463702072700135025ustar00rootroot00000000000000mod-0.19.0/sumdb/cache.go000066400000000000000000000027231463702072700151000ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Parallel cache. // This file is copied from cmd/go/internal/par. package sumdb import ( "sync" "sync/atomic" ) // parCache runs an action once per key and caches the result. type parCache struct { m sync.Map } type cacheEntry struct { done uint32 mu sync.Mutex result interface{} } // Do calls the function f if and only if Do is being called for the first time with this key. // No call to Do with a given key returns until the one call to f returns. // Do returns the value returned by the one call to f. func (c *parCache) Do(key interface{}, f func() interface{}) interface{} { entryIface, ok := c.m.Load(key) if !ok { entryIface, _ = c.m.LoadOrStore(key, new(cacheEntry)) } e := entryIface.(*cacheEntry) if atomic.LoadUint32(&e.done) == 0 { e.mu.Lock() if atomic.LoadUint32(&e.done) == 0 { e.result = f() atomic.StoreUint32(&e.done, 1) } e.mu.Unlock() } return e.result } // Get returns the cached result associated with key. // It returns nil if there is no such result. // If the result for key is being computed, Get does not wait for the computation to finish. func (c *parCache) Get(key interface{}) interface{} { entryIface, ok := c.m.Load(key) if !ok { return nil } e := entryIface.(*cacheEntry) if atomic.LoadUint32(&e.done) == 0 { return nil } return e.result } mod-0.19.0/sumdb/client.go000066400000000000000000000463151463702072700153200ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sumdb import ( "bytes" "errors" "fmt" "strings" "sync" "sync/atomic" "golang.org/x/mod/module" "golang.org/x/mod/sumdb/note" "golang.org/x/mod/sumdb/tlog" ) // A ClientOps provides the external operations // (file caching, HTTP fetches, and so on) needed by the [Client]. // The methods must be safe for concurrent use by multiple goroutines. type ClientOps interface { // ReadRemote reads and returns the content served at the given path // on the remote database server. The path begins with "/lookup" or "/tile/", // and there is no need to parse the path in any way. // It is the implementation's responsibility to turn that path into a full URL // and make the HTTP request. ReadRemote should return an error for // any non-200 HTTP response status. ReadRemote(path string) ([]byte, error) // ReadConfig reads and returns the content of the named configuration file. // There are only a fixed set of configuration files. // // "key" returns a file containing the verifier key for the server. // // serverName + "/latest" returns a file containing the latest known // signed tree from the server. // To signal that the client wishes to start with an "empty" signed tree, // ReadConfig can return a successful empty result (0 bytes of data). ReadConfig(file string) ([]byte, error) // WriteConfig updates the content of the named configuration file, // changing it from the old []byte to the new []byte. // If the old []byte does not match the stored configuration, // WriteConfig must return ErrWriteConflict. // Otherwise, WriteConfig should atomically replace old with new. // The "key" configuration file is never written using WriteConfig. WriteConfig(file string, old, new []byte) error // ReadCache reads and returns the content of the named cache file. // Any returned error will be treated as equivalent to the file not existing. // There can be arbitrarily many cache files, such as: // serverName/lookup/pkg@version // serverName/tile/8/1/x123/456 ReadCache(file string) ([]byte, error) // WriteCache writes the named cache file. WriteCache(file string, data []byte) // Log prints the given log message (such as with log.Print) Log(msg string) // SecurityError prints the given security error log message. // The Client returns ErrSecurity from any operation that invokes SecurityError, // but the return value is mainly for testing. In a real program, // SecurityError should typically print the message and call log.Fatal or os.Exit. SecurityError(msg string) } // ErrWriteConflict signals a write conflict during Client.WriteConfig. var ErrWriteConflict = errors.New("write conflict") // ErrSecurity is returned by [Client] operations that invoke Client.SecurityError. var ErrSecurity = errors.New("security error: misbehaving server") // A Client is a client connection to a checksum database. // All the methods are safe for simultaneous use by multiple goroutines. type Client struct { ops ClientOps // access to operations in the external world didLookup uint32 // one-time initialized data initOnce sync.Once initErr error // init error, if any name string // name of accepted verifier verifiers note.Verifiers // accepted verifiers (just one, but Verifiers for note.Open) tileReader tileReader tileHeight int nosumdb string record parCache // cache of record lookup, keyed by path@vers tileCache parCache // cache of c.readTile, keyed by tile latestMu sync.Mutex latest tlog.Tree // latest known tree head latestMsg []byte // encoded signed note for latest tileSavedMu sync.Mutex tileSaved map[tlog.Tile]bool // which tiles have been saved using c.ops.WriteCache already } // NewClient returns a new [Client] using the given [ClientOps]. func NewClient(ops ClientOps) *Client { return &Client{ ops: ops, } } // init initializes the client (if not already initialized) // and returns any initialization error. func (c *Client) init() error { c.initOnce.Do(c.initWork) return c.initErr } // initWork does the actual initialization work. func (c *Client) initWork() { defer func() { if c.initErr != nil { c.initErr = fmt.Errorf("initializing sumdb.Client: %v", c.initErr) } }() c.tileReader.c = c if c.tileHeight == 0 { c.tileHeight = 8 } c.tileSaved = make(map[tlog.Tile]bool) vkey, err := c.ops.ReadConfig("key") if err != nil { c.initErr = err return } verifier, err := note.NewVerifier(strings.TrimSpace(string(vkey))) if err != nil { c.initErr = err return } c.verifiers = note.VerifierList(verifier) c.name = verifier.Name() if c.latest.N == 0 { c.latest.Hash, err = tlog.TreeHash(0, nil) if err != nil { c.initErr = err return } } data, err := c.ops.ReadConfig(c.name + "/latest") if err != nil { c.initErr = err return } if err := c.mergeLatest(data); err != nil { c.initErr = err return } } // SetTileHeight sets the tile height for the Client. // Any call to SetTileHeight must happen before the first call to [Client.Lookup]. // If SetTileHeight is not called, the Client defaults to tile height 8. // SetTileHeight can be called at most once, // and if so it must be called before the first call to Lookup. func (c *Client) SetTileHeight(height int) { if atomic.LoadUint32(&c.didLookup) != 0 { panic("SetTileHeight used after Lookup") } if height <= 0 { panic("invalid call to SetTileHeight") } if c.tileHeight != 0 { panic("multiple calls to SetTileHeight") } c.tileHeight = height } // SetGONOSUMDB sets the list of comma-separated GONOSUMDB patterns for the Client. // For any module path matching one of the patterns, // [Client.Lookup] will return ErrGONOSUMDB. // SetGONOSUMDB can be called at most once, // and if so it must be called before the first call to Lookup. func (c *Client) SetGONOSUMDB(list string) { if atomic.LoadUint32(&c.didLookup) != 0 { panic("SetGONOSUMDB used after Lookup") } if c.nosumdb != "" { panic("multiple calls to SetGONOSUMDB") } c.nosumdb = list } // ErrGONOSUMDB is returned by [Client.Lookup] for paths that match // a pattern listed in the GONOSUMDB list (set by [Client.SetGONOSUMDB], // usually from the environment variable). var ErrGONOSUMDB = errors.New("skipped (listed in GONOSUMDB)") func (c *Client) skip(target string) bool { return module.MatchPrefixPatterns(c.nosumdb, target) } // Lookup returns the go.sum lines for the given module path and version. // The version may end in a /go.mod suffix, in which case Lookup returns // the go.sum lines for the module's go.mod-only hash. func (c *Client) Lookup(path, vers string) (lines []string, err error) { atomic.StoreUint32(&c.didLookup, 1) if c.skip(path) { return nil, ErrGONOSUMDB } defer func() { if err != nil { err = fmt.Errorf("%s@%s: %v", path, vers, err) } }() if err := c.init(); err != nil { return nil, err } // Prepare encoded cache filename / URL. epath, err := module.EscapePath(path) if err != nil { return nil, err } evers, err := module.EscapeVersion(strings.TrimSuffix(vers, "/go.mod")) if err != nil { return nil, err } remotePath := "/lookup/" + epath + "@" + evers file := c.name + remotePath // Fetch the data. // The lookupCache avoids redundant ReadCache/GetURL operations // (especially since go.sum lines tend to come in pairs for a given // path and version) and also avoids having multiple of the same // request in flight at once. type cached struct { data []byte err error } result := c.record.Do(file, func() interface{} { // Try the on-disk cache, or else get from web. writeCache := false data, err := c.ops.ReadCache(file) if err != nil { data, err = c.ops.ReadRemote(remotePath) if err != nil { return cached{nil, err} } writeCache = true } // Validate the record before using it for anything. id, text, treeMsg, err := tlog.ParseRecord(data) if err != nil { return cached{nil, err} } if err := c.mergeLatest(treeMsg); err != nil { return cached{nil, err} } if err := c.checkRecord(id, text); err != nil { return cached{nil, err} } // Now that we've validated the record, // save it to the on-disk cache (unless that's where it came from). if writeCache { c.ops.WriteCache(file, data) } return cached{data, nil} }).(cached) if result.err != nil { return nil, result.err } // Extract the lines for the specific version we want // (with or without /go.mod). prefix := path + " " + vers + " " var hashes []string for _, line := range strings.Split(string(result.data), "\n") { if strings.HasPrefix(line, prefix) { hashes = append(hashes, line) } } return hashes, nil } // mergeLatest merges the tree head in msg // with the Client's current latest tree head, // ensuring the result is a consistent timeline. // If the result is inconsistent, mergeLatest calls c.ops.SecurityError // with a detailed security error message and then // (only if c.ops.SecurityError does not exit the program) returns ErrSecurity. // If the Client's current latest tree head moves forward, // mergeLatest updates the underlying configuration file as well, // taking care to merge any independent updates to that configuration. func (c *Client) mergeLatest(msg []byte) error { // Merge msg into our in-memory copy of the latest tree head. when, err := c.mergeLatestMem(msg) if err != nil { return err } if when != msgFuture { // msg matched our present or was in the past. // No change to our present, so no update of config file. return nil } // Flush our extended timeline back out to the configuration file. // If the configuration file has been updated in the interim, // we need to merge any updates made there as well. // Note that writeConfig is an atomic compare-and-swap. for { msg, err := c.ops.ReadConfig(c.name + "/latest") if err != nil { return err } when, err := c.mergeLatestMem(msg) if err != nil { return err } if when != msgPast { // msg matched our present or was from the future, // and now our in-memory copy matches. return nil } // msg (== config) is in the past, so we need to update it. c.latestMu.Lock() latestMsg := c.latestMsg c.latestMu.Unlock() if err := c.ops.WriteConfig(c.name+"/latest", msg, latestMsg); err != ErrWriteConflict { // Success or a non-write-conflict error. return err } } } const ( msgPast = 1 + iota msgNow msgFuture ) // mergeLatestMem is like mergeLatest but is only concerned with // updating the in-memory copy of the latest tree head (c.latest) // not the configuration file. // The when result explains when msg happened relative to our // previous idea of c.latest: // msgPast means msg was from before c.latest, // msgNow means msg was exactly c.latest, and // msgFuture means msg was from after c.latest, which has now been updated. func (c *Client) mergeLatestMem(msg []byte) (when int, err error) { if len(msg) == 0 { // Accept empty msg as the unsigned, empty timeline. c.latestMu.Lock() latest := c.latest c.latestMu.Unlock() if latest.N == 0 { return msgNow, nil } return msgPast, nil } note, err := note.Open(msg, c.verifiers) if err != nil { return 0, fmt.Errorf("reading tree note: %v\nnote:\n%s", err, msg) } tree, err := tlog.ParseTree([]byte(note.Text)) if err != nil { return 0, fmt.Errorf("reading tree: %v\ntree:\n%s", err, note.Text) } // Other lookups may be calling mergeLatest with other heads, // so c.latest is changing underfoot. We don't want to hold the // c.mu lock during tile fetches, so loop trying to update c.latest. c.latestMu.Lock() latest := c.latest latestMsg := c.latestMsg c.latestMu.Unlock() for { // If the tree head looks old, check that it is on our timeline. if tree.N <= latest.N { if err := c.checkTrees(tree, msg, latest, latestMsg); err != nil { return 0, err } if tree.N < latest.N { return msgPast, nil } return msgNow, nil } // The tree head looks new. Check that we are on its timeline and try to move our timeline forward. if err := c.checkTrees(latest, latestMsg, tree, msg); err != nil { return 0, err } // Install our msg if possible. // Otherwise we will go around again. c.latestMu.Lock() installed := false if c.latest == latest { installed = true c.latest = tree c.latestMsg = msg } else { latest = c.latest latestMsg = c.latestMsg } c.latestMu.Unlock() if installed { return msgFuture, nil } } } // checkTrees checks that older (from olderNote) is contained in newer (from newerNote). // If an error occurs, such as malformed data or a network problem, checkTrees returns that error. // If on the other hand checkTrees finds evidence of misbehavior, it prepares a detailed // message and calls log.Fatal. func (c *Client) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, newerNote []byte) error { thr := tlog.TileHashReader(newer, &c.tileReader) h, err := tlog.TreeHash(older.N, thr) if err != nil { if older.N == newer.N { return fmt.Errorf("checking tree#%d: %v", older.N, err) } return fmt.Errorf("checking tree#%d against tree#%d: %v", older.N, newer.N, err) } if h == older.Hash { return nil } // Detected a fork in the tree timeline. // Start by reporting the inconsistent signed tree notes. var buf bytes.Buffer fmt.Fprintf(&buf, "SECURITY ERROR\n") fmt.Fprintf(&buf, "go.sum database server misbehavior detected!\n\n") indent := func(b []byte) []byte { return bytes.Replace(b, []byte("\n"), []byte("\n\t"), -1) } fmt.Fprintf(&buf, "old database:\n\t%s\n", indent(olderNote)) fmt.Fprintf(&buf, "new database:\n\t%s\n", indent(newerNote)) // The notes alone are not enough to prove the inconsistency. // We also need to show that the newer note's tree hash for older.N // does not match older.Hash. The consumer of this report could // of course consult the server to try to verify the inconsistency, // but we are holding all the bits we need to prove it right now, // so we might as well print them and make the report not depend // on the continued availability of the misbehaving server. // Preparing this data only reuses the tiled hashes needed for // tlog.TreeHash(older.N, thr) above, so assuming thr is caching tiles, // there are no new access to the server here, and these operations cannot fail. fmt.Fprintf(&buf, "proof of misbehavior:\n\t%v", h) if p, err := tlog.ProveTree(newer.N, older.N, thr); err != nil { fmt.Fprintf(&buf, "\tinternal error: %v\n", err) } else if err := tlog.CheckTree(p, newer.N, newer.Hash, older.N, h); err != nil { fmt.Fprintf(&buf, "\tinternal error: generated inconsistent proof\n") } else { for _, h := range p { fmt.Fprintf(&buf, "\n\t%v", h) } } c.ops.SecurityError(buf.String()) return ErrSecurity } // checkRecord checks that record #id's hash matches data. func (c *Client) checkRecord(id int64, data []byte) error { c.latestMu.Lock() latest := c.latest c.latestMu.Unlock() if id >= latest.N { return fmt.Errorf("cannot validate record %d in tree of size %d", id, latest.N) } hashes, err := tlog.TileHashReader(latest, &c.tileReader).ReadHashes([]int64{tlog.StoredHashIndex(0, id)}) if err != nil { return err } if hashes[0] == tlog.RecordHash(data) { return nil } return fmt.Errorf("cannot authenticate record data in server response") } // tileReader is a *Client wrapper that implements tlog.TileReader. // The separate type avoids exposing the ReadTiles and SaveTiles // methods on Client itself. type tileReader struct { c *Client } func (r *tileReader) Height() int { return r.c.tileHeight } // ReadTiles reads and returns the requested tiles, // either from the on-disk cache or the server. func (r *tileReader) ReadTiles(tiles []tlog.Tile) ([][]byte, error) { // Read all the tiles in parallel. data := make([][]byte, len(tiles)) errs := make([]error, len(tiles)) var wg sync.WaitGroup for i, tile := range tiles { wg.Add(1) go func(i int, tile tlog.Tile) { defer wg.Done() defer func() { if e := recover(); e != nil { errs[i] = fmt.Errorf("panic: %v", e) } }() data[i], errs[i] = r.c.readTile(tile) }(i, tile) } wg.Wait() for _, err := range errs { if err != nil { return nil, err } } return data, nil } // tileCacheKey returns the cache key for the tile. func (c *Client) tileCacheKey(tile tlog.Tile) string { return c.name + "/" + tile.Path() } // tileRemotePath returns the remote path for the tile. func (c *Client) tileRemotePath(tile tlog.Tile) string { return "/" + tile.Path() } // readTile reads a single tile, either from the on-disk cache or the server. func (c *Client) readTile(tile tlog.Tile) ([]byte, error) { type cached struct { data []byte err error } result := c.tileCache.Do(tile, func() interface{} { // Try the requested tile in on-disk cache. data, err := c.ops.ReadCache(c.tileCacheKey(tile)) if err == nil { c.markTileSaved(tile) return cached{data, nil} } // Try the full tile in on-disk cache (if requested tile not already full). // We only save authenticated tiles to the on-disk cache, // so the recreated prefix is equally authenticated. full := tile full.W = 1 << uint(tile.H) if tile != full { data, err := c.ops.ReadCache(c.tileCacheKey(full)) if err == nil { c.markTileSaved(tile) // don't save tile later; we already have full return cached{data[:len(data)/full.W*tile.W], nil} } } // Try requested tile from server. data, err = c.ops.ReadRemote(c.tileRemotePath(tile)) if err == nil { return cached{data, nil} } // Try full tile on server. // If the partial tile does not exist, it should be because // the tile has been completed and only the complete one // is available. if tile != full { data, err := c.ops.ReadRemote(c.tileRemotePath(full)) if err == nil { // Note: We could save the full tile in the on-disk cache here, // but we don't know if it is valid yet, and we will only find out // about the partial data, not the full data. So let SaveTiles // save the partial tile, and we'll just refetch the full tile later // once we can validate more (or all) of it. return cached{data[:len(data)/full.W*tile.W], nil} } } // Nothing worked. // Return the error from the server fetch for the requested (not full) tile. return cached{nil, err} }).(cached) return result.data, result.err } // markTileSaved records that tile is already present in the on-disk cache, // so that a future SaveTiles for that tile can be ignored. func (c *Client) markTileSaved(tile tlog.Tile) { c.tileSavedMu.Lock() c.tileSaved[tile] = true c.tileSavedMu.Unlock() } // SaveTiles saves the now validated tiles. func (r *tileReader) SaveTiles(tiles []tlog.Tile, data [][]byte) { c := r.c // Determine which tiles need saving. // (Tiles that came from the cache need not be saved back.) save := make([]bool, len(tiles)) c.tileSavedMu.Lock() for i, tile := range tiles { if !c.tileSaved[tile] { save[i] = true c.tileSaved[tile] = true } } c.tileSavedMu.Unlock() for i, tile := range tiles { if save[i] { // If WriteCache fails here (out of disk space? i/o error?), // c.tileSaved[tile] is still true and we will not try to write it again. // Next time we run maybe we'll redownload it again and be // more successful. c.ops.WriteCache(c.name+"/"+tile.Path(), data[i]) } } } mod-0.19.0/sumdb/client_test.go000066400000000000000000000326061463702072700163550ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sumdb import ( "bytes" "fmt" "strings" "sync" "testing" "golang.org/x/mod/sumdb/note" "golang.org/x/mod/sumdb/tlog" ) const ( testName = "localhost.localdev/sumdb" testVerifierKey = "localhost.localdev/sumdb+00000c67+AcTrnkbUA+TU4heY3hkjiSES/DSQniBqIeQ/YppAUtK6" testSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk" ) func TestClientLookup(t *testing.T) { tc := newTestClient(t) tc.mustHaveLatest(1) // Basic lookup. tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=") tc.mustHaveLatest(3) // Everything should now be cached, both for the original package and its /go.mod. tc.getOK = false tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=") tc.mustLookup("rsc.io/sampler", "v1.3.0/go.mod", "rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=") tc.mustHaveLatest(3) tc.getOK = true tc.getTileOK = false // the cache has what we need // Lookup with multiple returned lines. tc.mustLookup("rsc.io/quote", "v1.5.2", "rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=\nrsc.io/quote v1.5.2 h2:xyzzy") tc.mustHaveLatest(3) // Lookup with need for !-encoding. // rsc.io/Quote is the only record written after rsc.io/samper, // so it is the only one that should need more tiles. tc.getTileOK = true tc.mustLookup("rsc.io/Quote", "v1.5.2", "rsc.io/Quote v1.5.2 h1:uppercase!=") tc.mustHaveLatest(4) } func TestClientBadTiles(t *testing.T) { tc := newTestClient(t) flipBits := func() { for url, data := range tc.remote { if strings.Contains(url, "/tile/") { for i := range data { data[i] ^= 0x80 } } } } // Bad tiles in initial download. tc.mustHaveLatest(1) flipBits() _, err := tc.client.Lookup("rsc.io/sampler", "v1.3.0") tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile") flipBits() tc.newClient() tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=") // Bad tiles after initial download. flipBits() _, err = tc.client.Lookup("rsc.io/Quote", "v1.5.2") tc.mustError(err, "rsc.io/Quote@v1.5.2: checking tree#3 against tree#4: downloaded inconsistent tile") flipBits() tc.newClient() tc.mustLookup("rsc.io/Quote", "v1.5.2", "rsc.io/Quote v1.5.2 h1:uppercase!=") // Bad starting tree hash looks like bad tiles. tc.newClient() text := tlog.FormatTree(tlog.Tree{N: 1, Hash: tlog.Hash{}}) data, err := note.Sign(¬e.Note{Text: string(text)}, tc.signer) if err != nil { tc.t.Fatal(err) } tc.config[testName+"/latest"] = data _, err = tc.client.Lookup("rsc.io/sampler", "v1.3.0") tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile") } func TestClientFork(t *testing.T) { tc := newTestClient(t) tc2 := tc.fork() tc.addRecord("rsc.io/pkg1@v1.5.2", `rsc.io/pkg1 v1.5.2 h1:hash!= `) tc.addRecord("rsc.io/pkg1@v1.5.4", `rsc.io/pkg1 v1.5.4 h1:hash!= `) tc.mustLookup("rsc.io/pkg1", "v1.5.2", "rsc.io/pkg1 v1.5.2 h1:hash!=") tc2.addRecord("rsc.io/pkg1@v1.5.3", `rsc.io/pkg1 v1.5.3 h1:hash!= `) tc2.addRecord("rsc.io/pkg1@v1.5.4", `rsc.io/pkg1 v1.5.4 h1:hash!= `) tc2.mustLookup("rsc.io/pkg1", "v1.5.4", "rsc.io/pkg1 v1.5.4 h1:hash!=") key := "/lookup/rsc.io/pkg1@v1.5.2" tc2.remote[key] = tc.remote[key] _, err := tc2.client.Lookup("rsc.io/pkg1", "v1.5.2") tc2.mustError(err, ErrSecurity.Error()) /* SECURITY ERROR go.sum database server misbehavior detected! old database: go.sum database tree! 5 nWzN20+pwMt62p7jbv1/NlN95ePTlHijabv5zO/s36w= — localhost.localdev/sumdb AAAMZ5/2FVAdMH58kmnz/0h299pwyskEbzDzoa2/YaPdhvLya4YWDFQQxu2TQb5GpwAH4NdWnTwuhILafisyf3CNbgg= new database: go.sum database tree 6 wc4SkQt52o5W2nQ8To2ARs+mWuUJjss+sdleoiqxMmM= — localhost.localdev/sumdb AAAMZ6oRNswlEZ6ZZhxrCvgl1MBy+nusq4JU+TG6Fe2NihWLqOzb+y2c2kzRLoCr4tvw9o36ucQEnhc20e4nA4Qc/wc= proof of misbehavior: T7i+H/8ER4nXOiw4Bj0koZOkGjkxoNvlI34GpvhHhQg= Nsuejv72de9hYNM5bqFv8rv3gm3zJQwv/DT/WNbLDLA= mOmqqZ1aI/lzS94oq/JSbj7pD8Rv9S+xDyi12BtVSHo= /7Aw5jVSMM9sFjQhaMg+iiDYPMk6decH7QLOGrL9Lx0= */ wants := []string{ "SECURITY ERROR", "go.sum database server misbehavior detected!", "old database:\n\tgo.sum database tree\n\t5\n", "— localhost.localdev/sumdb AAAMZ5/2FVAd", "new database:\n\tgo.sum database tree\n\t6\n", "— localhost.localdev/sumdb AAAMZ6oRNswl", "proof of misbehavior:\n\tT7i+H/8ER4nXOiw4Bj0k", } text := tc2.security.String() for _, want := range wants { if !strings.Contains(text, want) { t.Fatalf("cannot find %q in security text:\n%s", want, text) } } } func TestClientGONOSUMDB(t *testing.T) { tc := newTestClient(t) tc.client.SetGONOSUMDB("p,*/q") tc.client.Lookup("rsc.io/sampler", "v1.3.0") // initialize before we turn off network tc.getOK = false ok := []string{ "abc", "a/p", "pq", "q", "n/o/p/q", } skip := []string{ "p", "p/x", "x/q", "x/q/z", } for _, path := range ok { _, err := tc.client.Lookup(path, "v1.0.0") if err == ErrGONOSUMDB { t.Errorf("Lookup(%q): ErrGONOSUMDB, wanted failed actual lookup", path) } } for _, path := range skip { _, err := tc.client.Lookup(path, "v1.0.0") if err != ErrGONOSUMDB { t.Errorf("Lookup(%q): %v, wanted ErrGONOSUMDB", path, err) } } } // A testClient is a self-contained client-side testing environment. type testClient struct { t *testing.T // active test client *Client // client being tested tileHeight int // tile height to use (default 2) getOK bool // should tc.GetURL succeed? getTileOK bool // should tc.GetURL of tiles succeed? treeSize int64 hashes []tlog.Hash remote map[string][]byte signer note.Signer // mu protects config, cache, log, security // during concurrent use of the exported methods // by the client itself (testClient is the Client's ClientOps, // and the Client methods can both read and write these fields). // Unexported methods invoked directly by the test // (for example, addRecord) need not hold the mutex: // for proper test execution those methods should only // be called when the Client is idle and not using its ClientOps. // Not holding the mutex in those methods ensures // that if a mistake is made, go test -race will report it. // (Holding the mutex would eliminate the race report but // not the underlying problem.) // Similarly, the get map is not protected by the mutex, // because the Client methods only read it. mu sync.Mutex // prot config map[string][]byte cache map[string][]byte security bytes.Buffer } // newTestClient returns a new testClient that will call t.Fatal on error // and has a few records already available on the remote server. func newTestClient(t *testing.T) *testClient { tc := &testClient{ t: t, tileHeight: 2, getOK: true, getTileOK: true, config: make(map[string][]byte), cache: make(map[string][]byte), remote: make(map[string][]byte), } tc.config["key"] = []byte(testVerifierKey + "\n") var err error tc.signer, err = note.NewSigner(testSignerKey) if err != nil { t.Fatal(err) } tc.newClient() tc.addRecord("rsc.io/quote@v1.5.2", `rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y= rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0= rsc.io/quote v1.5.2 h2:xyzzy `) tc.addRecord("golang.org/x/text@v0.0.0-20170915032832-14c0d48ead0c", `golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:qgOY6WgZOaTkIIMiVjBQcw93ERBE4m30iBm00nkL0i8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= `) tc.addRecord("rsc.io/sampler@v1.3.0", `rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= `) tc.config[testName+"/latest"] = tc.signTree(1) tc.addRecord("rsc.io/!quote@v1.5.2", `rsc.io/Quote v1.5.2 h1:uppercase!= `) return tc } // newClient resets the Client associated with tc. // This clears any in-memory cache from the Client // but not tc's on-disk cache. func (tc *testClient) newClient() { tc.client = NewClient(tc) tc.client.SetTileHeight(tc.tileHeight) } // mustLookup does a lookup for path@vers and checks that the lines that come back match want. func (tc *testClient) mustLookup(path, vers, want string) { tc.t.Helper() lines, err := tc.client.Lookup(path, vers) if err != nil { tc.t.Fatal(err) } if strings.Join(lines, "\n") != want { tc.t.Fatalf("Lookup(%q, %q):\n\t%s\nwant:\n\t%s", path, vers, strings.Join(lines, "\n\t"), strings.Replace(want, "\n", "\n\t", -1)) } } // mustHaveLatest checks that the on-disk configuration // for latest is a tree of size n. func (tc *testClient) mustHaveLatest(n int64) { tc.t.Helper() latest := tc.config[testName+"/latest"] lines := strings.Split(string(latest), "\n") if len(lines) < 2 || lines[1] != fmt.Sprint(n) { tc.t.Fatalf("/latest should have tree %d, but has:\n%s", n, latest) } } // mustError checks that err's error string contains the text. func (tc *testClient) mustError(err error, text string) { tc.t.Helper() if err == nil || !strings.Contains(err.Error(), text) { tc.t.Fatalf("err = %v, want %q", err, text) } } // fork returns a copy of tc. // Changes made to the new copy or to tc are not reflected in the other. func (tc *testClient) fork() *testClient { tc2 := &testClient{ t: tc.t, getOK: tc.getOK, getTileOK: tc.getTileOK, tileHeight: tc.tileHeight, treeSize: tc.treeSize, hashes: append([]tlog.Hash{}, tc.hashes...), signer: tc.signer, config: copyMap(tc.config), cache: copyMap(tc.cache), remote: copyMap(tc.remote), } tc2.newClient() return tc2 } func copyMap(m map[string][]byte) map[string][]byte { m2 := make(map[string][]byte) for k, v := range m { m2[k] = v } return m2 } // ReadHashes is tc's implementation of tlog.HashReader, for use with // tlog.TreeHash and so on. func (tc *testClient) ReadHashes(indexes []int64) ([]tlog.Hash, error) { var list []tlog.Hash for _, id := range indexes { list = append(list, tc.hashes[id]) } return list, nil } // addRecord adds a log record using the given (!-encoded) key and data. func (tc *testClient) addRecord(key, data string) { tc.t.Helper() // Create record, add hashes to log tree. id := tc.treeSize tc.treeSize++ rec, err := tlog.FormatRecord(id, []byte(data)) if err != nil { tc.t.Fatal(err) } hashes, err := tlog.StoredHashesForRecordHash(id, tlog.RecordHash([]byte(data)), tc) if err != nil { tc.t.Fatal(err) } tc.hashes = append(tc.hashes, hashes...) // Create lookup result. tc.remote["/lookup/"+key] = append(rec, tc.signTree(tc.treeSize)...) // Create new tiles. tiles := tlog.NewTiles(tc.tileHeight, id, tc.treeSize) for _, tile := range tiles { data, err := tlog.ReadTileData(tile, tc) if err != nil { tc.t.Fatal(err) } tc.remote["/"+tile.Path()] = data // TODO delete old partial tiles } } // signTree returns the signed head for the tree of the given size. func (tc *testClient) signTree(size int64) []byte { h, err := tlog.TreeHash(size, tc) if err != nil { tc.t.Fatal(err) } text := tlog.FormatTree(tlog.Tree{N: size, Hash: h}) data, err := note.Sign(¬e.Note{Text: string(text)}, tc.signer) if err != nil { tc.t.Fatal(err) } return data } // ReadRemote is for tc's implementation of Client. func (tc *testClient) ReadRemote(path string) ([]byte, error) { // No mutex here because only the Client should be running // and the Client cannot change tc.get. if !tc.getOK { return nil, fmt.Errorf("disallowed remote read %s", path) } if strings.Contains(path, "/tile/") && !tc.getTileOK { return nil, fmt.Errorf("disallowed remote tile read %s", path) } data, ok := tc.remote[path] if !ok { return nil, fmt.Errorf("no remote path %s", path) } return data, nil } // ReadConfig is for tc's implementation of Client. func (tc *testClient) ReadConfig(file string) ([]byte, error) { tc.mu.Lock() defer tc.mu.Unlock() data, ok := tc.config[file] if !ok { return nil, fmt.Errorf("no config %s", file) } return data, nil } // WriteConfig is for tc's implementation of Client. func (tc *testClient) WriteConfig(file string, old, new []byte) error { tc.mu.Lock() defer tc.mu.Unlock() data := tc.config[file] if !bytes.Equal(old, data) { return ErrWriteConflict } tc.config[file] = new return nil } // ReadCache is for tc's implementation of Client. func (tc *testClient) ReadCache(file string) ([]byte, error) { tc.mu.Lock() defer tc.mu.Unlock() data, ok := tc.cache[file] if !ok { return nil, fmt.Errorf("no cache %s", file) } return data, nil } // WriteCache is for tc's implementation of Client. func (tc *testClient) WriteCache(file string, data []byte) { tc.mu.Lock() defer tc.mu.Unlock() tc.cache[file] = data } // Log is for tc's implementation of Client. func (tc *testClient) Log(msg string) { tc.t.Log(msg) } // SecurityError is for tc's implementation of Client. func (tc *testClient) SecurityError(msg string) { tc.mu.Lock() defer tc.mu.Unlock() fmt.Fprintf(&tc.security, "%s\n", strings.TrimRight(msg, "\n")) } mod-0.19.0/sumdb/dirhash/000077500000000000000000000000001463702072700151245ustar00rootroot00000000000000mod-0.19.0/sumdb/dirhash/hash.go000066400000000000000000000077741463702072700164150ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package dirhash defines hashes over directory trees. // These hashes are recorded in go.sum files and in the Go checksum database, // to allow verifying that a newly-downloaded module has the expected content. package dirhash import ( "archive/zip" "crypto/sha256" "encoding/base64" "errors" "fmt" "io" "os" "path/filepath" "sort" "strings" ) // DefaultHash is the default hash function used in new go.sum entries. var DefaultHash Hash = Hash1 // A Hash is a directory hash function. // It accepts a list of files along with a function that opens the content of each file. // It opens, reads, hashes, and closes each file and returns the overall directory hash. type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error) // Hash1 is the "h1:" directory hash function, using SHA-256. // // Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary // prepared as if by the Unix command: // // sha256sum $(find . -type f | sort) | sha256sum // // More precisely, the hashed summary contains a single line for each file in the list, // ordered by sort.Strings applied to the file names, where each line consists of // the hexadecimal SHA-256 hash of the file content, // two spaces (U+0020), the file name, and a newline (U+000A). // // File names with newlines (U+000A) are disallowed. func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) { h := sha256.New() files = append([]string(nil), files...) sort.Strings(files) for _, file := range files { if strings.Contains(file, "\n") { return "", errors.New("dirhash: filenames with newlines are not supported") } r, err := open(file) if err != nil { return "", err } hf := sha256.New() _, err = io.Copy(hf, r) r.Close() if err != nil { return "", err } fmt.Fprintf(h, "%x %s\n", hf.Sum(nil), file) } return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil } // HashDir returns the hash of the local file system directory dir, // replacing the directory name itself with prefix in the file names // used in the hash function. func HashDir(dir, prefix string, hash Hash) (string, error) { files, err := DirFiles(dir, prefix) if err != nil { return "", err } osOpen := func(name string) (io.ReadCloser, error) { return os.Open(filepath.Join(dir, strings.TrimPrefix(name, prefix))) } return hash(files, osOpen) } // DirFiles returns the list of files in the tree rooted at dir, // replacing the directory name dir with prefix in each name. // The resulting names always use forward slashes. func DirFiles(dir, prefix string) ([]string, error) { var files []string dir = filepath.Clean(dir) err := filepath.Walk(dir, func(file string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } else if file == dir { return fmt.Errorf("%s is not a directory", dir) } rel := file if dir != "." { rel = file[len(dir)+1:] } f := filepath.Join(prefix, rel) files = append(files, filepath.ToSlash(f)) return nil }) if err != nil { return nil, err } return files, nil } // HashZip returns the hash of the file content in the named zip file. // Only the file names and their contents are included in the hash: // the exact zip file format encoding, compression method, // per-file modification times, and other metadata are ignored. func HashZip(zipfile string, hash Hash) (string, error) { z, err := zip.OpenReader(zipfile) if err != nil { return "", err } defer z.Close() var files []string zfiles := make(map[string]*zip.File) for _, file := range z.File { files = append(files, file.Name) zfiles[file.Name] = file } zipOpen := func(name string) (io.ReadCloser, error) { f := zfiles[name] if f == nil { return nil, fmt.Errorf("file %q not found in zip", name) // should never happen } return f.Open() } return hash(files, zipOpen) } mod-0.19.0/sumdb/dirhash/hash_test.go000066400000000000000000000071221463702072700174370ustar00rootroot00000000000000// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package dirhash import ( "archive/zip" "crypto/sha256" "encoding/base64" "fmt" "io" "os" "path/filepath" "strings" "testing" ) func h(s string) string { return fmt.Sprintf("%x", sha256.Sum256([]byte(s))) } func htop(k string, s string) string { sum := sha256.Sum256([]byte(s)) return k + ":" + base64.StdEncoding.EncodeToString(sum[:]) } func TestHash1(t *testing.T) { files := []string{"xyz", "abc"} open := func(name string) (io.ReadCloser, error) { return io.NopCloser(strings.NewReader("data for " + name)), nil } want := htop("h1", fmt.Sprintf("%s %s\n%s %s\n", h("data for abc"), "abc", h("data for xyz"), "xyz")) out, err := Hash1(files, open) if err != nil { t.Fatal(err) } if out != want { t.Errorf("Hash1(...) = %s, want %s", out, want) } _, err = Hash1([]string{"xyz", "a\nbc"}, open) if err == nil { t.Error("Hash1: expected error on newline in filenames") } } func TestHashDir(t *testing.T) { dir := t.TempDir() if err := os.WriteFile(filepath.Join(dir, "xyz"), []byte("data for xyz"), 0666); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(dir, "abc"), []byte("data for abc"), 0666); err != nil { t.Fatal(err) } want := htop("h1", fmt.Sprintf("%s %s\n%s %s\n", h("data for abc"), "prefix/abc", h("data for xyz"), "prefix/xyz")) out, err := HashDir(dir, "prefix", Hash1) if err != nil { t.Fatalf("HashDir: %v", err) } if out != want { t.Errorf("HashDir(...) = %s, want %s", out, want) } } func TestHashZip(t *testing.T) { f, err := os.CreateTemp(t.TempDir(), "dirhash-test-") if err != nil { t.Fatal(err) } defer f.Close() z := zip.NewWriter(f) w, err := z.Create("prefix/xyz") if err != nil { t.Fatal(err) } w.Write([]byte("data for xyz")) w, err = z.Create("prefix/abc") if err != nil { t.Fatal(err) } w.Write([]byte("data for abc")) if err := z.Close(); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } want := htop("h1", fmt.Sprintf("%s %s\n%s %s\n", h("data for abc"), "prefix/abc", h("data for xyz"), "prefix/xyz")) out, err := HashZip(f.Name(), Hash1) if err != nil { t.Fatalf("HashDir: %v", err) } if out != want { t.Errorf("HashDir(...) = %s, want %s", out, want) } } func TestDirFiles(t *testing.T) { t.Run("valid directory with files", func(t *testing.T) { dir := t.TempDir() if err := os.WriteFile(filepath.Join(dir, "xyz"), []byte("data for xyz"), 0666); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(dir, "abc"), []byte("data for abc"), 0666); err != nil { t.Fatal(err) } if err := os.Mkdir(filepath.Join(dir, "subdir"), 0777); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(dir, "subdir", "xyz"), []byte("data for subdir xyz"), 0666); err != nil { t.Fatal(err) } prefix := "foo/bar@v2.3.4" out, err := DirFiles(dir, prefix) if err != nil { t.Fatalf("DirFiles: %v", err) } for _, file := range out { if !strings.HasPrefix(file, prefix) { t.Errorf("Dir file = %s, want prefix %s", file, prefix) } } }) t.Run("invalid directory", func(t *testing.T) { path := filepath.Join(t.TempDir(), "not-a-directory.txt") if err := os.WriteFile(path, []byte("This is a file."), 0644); err != nil { t.Fatal(err) } defer os.RemoveAll(path) out, err := DirFiles(path, "") if err == nil { t.Errorf("DirFiles(...) = %v, expected an error", err) } if len(out) > 0 { t.Errorf("DirFiles(...) = unexpected files %s", out) } }) } mod-0.19.0/sumdb/note/000077500000000000000000000000001463702072700144475ustar00rootroot00000000000000mod-0.19.0/sumdb/note/example_test.go000066400000000000000000000061531463702072700174750ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package note_test import ( "fmt" "io" "os" "golang.org/x/mod/sumdb/note" ) func ExampleSign() { skey := "PRIVATE+KEY+PeterNeumann+c74f20a3+AYEKFALVFGyNhPJEMzD1QIDr+Y7hfZx09iUvxdXHKDFz" text := "If you think cryptography is the answer to your problem,\n" + "then you don't know what your problem is.\n" signer, err := note.NewSigner(skey) if err != nil { fmt.Println(err) return } msg, err := note.Sign(¬e.Note{Text: text}, signer) if err != nil { fmt.Println(err) return } os.Stdout.Write(msg) // Output: // If you think cryptography is the answer to your problem, // then you don't know what your problem is. // // — PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM= } func ExampleOpen() { vkey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW" msg := []byte("If you think cryptography is the answer to your problem,\n" + "then you don't know what your problem is.\n" + "\n" + "— PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM=\n") verifier, err := note.NewVerifier(vkey) if err != nil { fmt.Println(err) return } verifiers := note.VerifierList(verifier) n, err := note.Open(msg, verifiers) if err != nil { fmt.Println(err) return } fmt.Printf("%s (%08x):\n%s", n.Sigs[0].Name, n.Sigs[0].Hash, n.Text) // Output: // PeterNeumann (c74f20a3): // If you think cryptography is the answer to your problem, // then you don't know what your problem is. } var rand = struct { Reader io.Reader }{ zeroReader{}, } type zeroReader struct{} func (zeroReader) Read(buf []byte) (int, error) { for i := range buf { buf[i] = 0 } return len(buf), nil } func ExampleSign_add_signatures() { vkey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW" msg := []byte("If you think cryptography is the answer to your problem,\n" + "then you don't know what your problem is.\n" + "\n" + "— PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM=\n") verifier, err := note.NewVerifier(vkey) if err != nil { fmt.Println(err) return } verifiers := note.VerifierList(verifier) n, err := note.Open(msg, verifiers) if err != nil { fmt.Println(err) return } skey, vkey, err := note.GenerateKey(rand.Reader, "EnochRoot") if err != nil { fmt.Println(err) return } _ = vkey // give to verifiers me, err := note.NewSigner(skey) if err != nil { fmt.Println(err) return } msg, err = note.Sign(n, me) if err != nil { fmt.Println(err) return } os.Stdout.Write(msg) // Output: // If you think cryptography is the answer to your problem, // then you don't know what your problem is. // // — PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM= // — EnochRoot rwz+eBzmZa0SO3NbfRGzPCpDckykFXSdeX+MNtCOXm2/5n2tiOHp+vAF1aGrQ5ovTG01oOTGwnWLox33WWd1RvMc+QQ= } mod-0.19.0/sumdb/note/note.go000066400000000000000000000502101463702072700157410ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package note defines the notes signed by the Go module database server. // // A note is text signed by one or more server keys. // The text should be ignored unless the note is signed by // a trusted server key and the signature has been verified // using the server's public key. // // A server's public key is identified by a name, typically the "host[/path]" // giving the base URL of the server's transparency log. // The syntactic restrictions on a name are that it be non-empty, // well-formed UTF-8 containing neither Unicode spaces nor plus (U+002B). // // A Go module database server signs texts using public key cryptography. // A given server may have multiple public keys, each // identified by a 32-bit hash of the public key. // // # Verifying Notes // // A [Verifier] allows verification of signatures by one server public key. // It can report the name of the server and the uint32 hash of the key, // and it can verify a purported signature by that key. // // The standard implementation of a Verifier is constructed // by [NewVerifier] starting from a verifier key, which is a // plain text string of the form "++". // // A [Verifiers] allows looking up a Verifier by the combination // of server name and key hash. // // The standard implementation of a Verifiers is constructed // by VerifierList from a list of known verifiers. // // A [Note] represents a text with one or more signatures. // An implementation can reject a note with too many signatures // (for example, more than 100 signatures). // // A [Signature] represents a signature on a note, verified or not. // // The [Open] function takes as input a signed message // and a set of known verifiers. It decodes and verifies // the message signatures and returns a [Note] structure // containing the message text and (verified or unverified) signatures. // // # Signing Notes // // A [Signer] allows signing a text with a given key. // It can report the name of the server and the hash of the key // and can sign a raw text using that key. // // The standard implementation of a Signer is constructed // by [NewSigner] starting from an encoded signer key, which is a // plain text string of the form "PRIVATE+KEY+++". // Anyone with an encoded signer key can sign messages using that key, // so it must be kept secret. The encoding begins with the literal text // "PRIVATE+KEY" to avoid confusion with the public server key. // // The [Sign] function takes as input a Note and a list of Signers // and returns an encoded, signed message. // // # Signed Note Format // // A signed note consists of a text ending in newline (U+000A), // followed by a blank line (only a newline), // followed by one or more signature lines of this form: // em dash (U+2014), space (U+0020), // server name, space, base64-encoded signature, newline. // // Signed notes must be valid UTF-8 and must not contain any // ASCII control characters (those below U+0020) other than newline. // // A signature is a base64 encoding of 4+n bytes. // // The first four bytes in the signature are the uint32 key hash // stored in big-endian order. // // The remaining n bytes are the result of using the specified key // to sign the note text (including the final newline but not the // separating blank line). // // # Generating Keys // // There is only one key type, Ed25519 with algorithm identifier 1. // New key types may be introduced in the future as needed, // although doing so will require deploying the new algorithms to all clients // before starting to depend on them for signatures. // // The [GenerateKey] function generates and returns a new signer // and corresponding verifier. // // # Example // // Here is a well-formed signed note: // // If you think cryptography is the answer to your problem, // then you don't know what your problem is. // // — PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM= // // It can be constructed and displayed using: // // skey := "PRIVATE+KEY+PeterNeumann+c74f20a3+AYEKFALVFGyNhPJEMzD1QIDr+Y7hfZx09iUvxdXHKDFz" // text := "If you think cryptography is the answer to your problem,\n" + // "then you don't know what your problem is.\n" // // signer, err := note.NewSigner(skey) // if err != nil { // log.Fatal(err) // } // // msg, err := note.Sign(¬e.Note{Text: text}, signer) // if err != nil { // log.Fatal(err) // } // os.Stdout.Write(msg) // // The note's text is two lines, including the final newline, // and the text is purportedly signed by a server named // "PeterNeumann". (Although server names are canonically // base URLs, the only syntactic requirement is that they // not contain spaces or newlines). // // If [Open] is given access to a [Verifiers] including the // [Verifier] for this key, then it will succeed at verifying // the encoded message and returning the parsed [Note]: // // vkey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW" // msg := []byte("If you think cryptography is the answer to your problem,\n" + // "then you don't know what your problem is.\n" + // "\n" + // "— PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM=\n") // // verifier, err := note.NewVerifier(vkey) // if err != nil { // log.Fatal(err) // } // verifiers := note.VerifierList(verifier) // // n, err := note.Open([]byte(msg), verifiers) // if err != nil { // log.Fatal(err) // } // fmt.Printf("%s (%08x):\n%s", n.Sigs[0].Name, n.Sigs[0].Hash, n.Text) // // You can add your own signature to this message by re-signing the note: // // skey, vkey, err := note.GenerateKey(rand.Reader, "EnochRoot") // if err != nil { // log.Fatal(err) // } // _ = vkey // give to verifiers // // me, err := note.NewSigner(skey) // if err != nil { // log.Fatal(err) // } // // msg, err := note.Sign(n, me) // if err != nil { // log.Fatal(err) // } // os.Stdout.Write(msg) // // This will print a doubly-signed message, like: // // If you think cryptography is the answer to your problem, // then you don't know what your problem is. // // — PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM= // — EnochRoot rwz+eBzmZa0SO3NbfRGzPCpDckykFXSdeX+MNtCOXm2/5n2tiOHp+vAF1aGrQ5ovTG01oOTGwnWLox33WWd1RvMc+QQ= package note import ( "bytes" "crypto/ed25519" "crypto/sha256" "encoding/base64" "encoding/binary" "errors" "fmt" "io" "strconv" "strings" "unicode" "unicode/utf8" ) // A Verifier verifies messages signed with a specific key. type Verifier interface { // Name returns the server name associated with the key. Name() string // KeyHash returns the key hash. KeyHash() uint32 // Verify reports whether sig is a valid signature of msg. Verify(msg, sig []byte) bool } // A Signer signs messages using a specific key. type Signer interface { // Name returns the server name associated with the key. Name() string // KeyHash returns the key hash. KeyHash() uint32 // Sign returns a signature for the given message. Sign(msg []byte) ([]byte, error) } // keyHash computes the key hash for the given server name and encoded public key. func keyHash(name string, key []byte) uint32 { h := sha256.New() h.Write([]byte(name)) h.Write([]byte("\n")) h.Write(key) sum := h.Sum(nil) return binary.BigEndian.Uint32(sum) } var ( errVerifierID = errors.New("malformed verifier id") errVerifierAlg = errors.New("unknown verifier algorithm") errVerifierHash = errors.New("invalid verifier hash") ) const ( algEd25519 = 1 ) // isValidName reports whether name is valid. // It must be non-empty and not have any Unicode spaces or pluses. func isValidName(name string) bool { return name != "" && utf8.ValidString(name) && strings.IndexFunc(name, unicode.IsSpace) < 0 && !strings.Contains(name, "+") } // NewVerifier construct a new [Verifier] from an encoded verifier key. func NewVerifier(vkey string) (Verifier, error) { name, vkey := chop(vkey, "+") hash16, key64 := chop(vkey, "+") hash, err1 := strconv.ParseUint(hash16, 16, 32) key, err2 := base64.StdEncoding.DecodeString(key64) if len(hash16) != 8 || err1 != nil || err2 != nil || !isValidName(name) || len(key) == 0 { return nil, errVerifierID } if uint32(hash) != keyHash(name, key) { return nil, errVerifierHash } v := &verifier{ name: name, hash: uint32(hash), } alg, key := key[0], key[1:] switch alg { default: return nil, errVerifierAlg case algEd25519: if len(key) != 32 { return nil, errVerifierID } v.verify = func(msg, sig []byte) bool { return ed25519.Verify(key, msg, sig) } } return v, nil } // chop chops s at the first instance of sep, if any, // and returns the text before and after sep. // If sep is not present, chop returns before is s and after is empty. func chop(s, sep string) (before, after string) { i := strings.Index(s, sep) if i < 0 { return s, "" } return s[:i], s[i+len(sep):] } // verifier is a trivial Verifier implementation. type verifier struct { name string hash uint32 verify func([]byte, []byte) bool } func (v *verifier) Name() string { return v.name } func (v *verifier) KeyHash() uint32 { return v.hash } func (v *verifier) Verify(msg, sig []byte) bool { return v.verify(msg, sig) } // NewSigner constructs a new [Signer] from an encoded signer key. func NewSigner(skey string) (Signer, error) { priv1, skey := chop(skey, "+") priv2, skey := chop(skey, "+") name, skey := chop(skey, "+") hash16, key64 := chop(skey, "+") hash, err1 := strconv.ParseUint(hash16, 16, 32) key, err2 := base64.StdEncoding.DecodeString(key64) if priv1 != "PRIVATE" || priv2 != "KEY" || len(hash16) != 8 || err1 != nil || err2 != nil || !isValidName(name) || len(key) == 0 { return nil, errSignerID } // Note: hash is the hash of the public key and we have the private key. // Must verify hash after deriving public key. s := &signer{ name: name, hash: uint32(hash), } var pubkey []byte alg, key := key[0], key[1:] switch alg { default: return nil, errSignerAlg case algEd25519: if len(key) != 32 { return nil, errSignerID } key = ed25519.NewKeyFromSeed(key) pubkey = append([]byte{algEd25519}, key[32:]...) s.sign = func(msg []byte) ([]byte, error) { return ed25519.Sign(key, msg), nil } } if uint32(hash) != keyHash(name, pubkey) { return nil, errSignerHash } return s, nil } var ( errSignerID = errors.New("malformed verifier id") errSignerAlg = errors.New("unknown verifier algorithm") errSignerHash = errors.New("invalid verifier hash") ) // signer is a trivial Signer implementation. type signer struct { name string hash uint32 sign func([]byte) ([]byte, error) } func (s *signer) Name() string { return s.name } func (s *signer) KeyHash() uint32 { return s.hash } func (s *signer) Sign(msg []byte) ([]byte, error) { return s.sign(msg) } // GenerateKey generates a signer and verifier key pair for a named server. // The signer key skey is private and must be kept secret. func GenerateKey(rand io.Reader, name string) (skey, vkey string, err error) { pub, priv, err := ed25519.GenerateKey(rand) if err != nil { return "", "", err } pubkey := append([]byte{algEd25519}, pub...) privkey := append([]byte{algEd25519}, priv.Seed()...) h := keyHash(name, pubkey) skey = fmt.Sprintf("PRIVATE+KEY+%s+%08x+%s", name, h, base64.StdEncoding.EncodeToString(privkey)) vkey = fmt.Sprintf("%s+%08x+%s", name, h, base64.StdEncoding.EncodeToString(pubkey)) return skey, vkey, nil } // NewEd25519VerifierKey returns an encoded verifier key using the given name // and Ed25519 public key. func NewEd25519VerifierKey(name string, key ed25519.PublicKey) (string, error) { if len(key) != ed25519.PublicKeySize { return "", fmt.Errorf("invalid public key size %d, expected %d", len(key), ed25519.PublicKeySize) } pubkey := append([]byte{algEd25519}, key...) hash := keyHash(name, pubkey) b64Key := base64.StdEncoding.EncodeToString(pubkey) return fmt.Sprintf("%s+%08x+%s", name, hash, b64Key), nil } // A Verifiers is a collection of known verifier keys. type Verifiers interface { // Verifier returns the Verifier associated with the key // identified by the name and hash. // If the name, hash pair is unknown, Verifier should return // an UnknownVerifierError. Verifier(name string, hash uint32) (Verifier, error) } // An UnknownVerifierError indicates that the given key is not known. // The Open function records signatures without associated verifiers as // unverified signatures. type UnknownVerifierError struct { Name string KeyHash uint32 } func (e *UnknownVerifierError) Error() string { return fmt.Sprintf("unknown key %s+%08x", e.Name, e.KeyHash) } // An ambiguousVerifierError indicates that the given name and hash // match multiple keys passed to [VerifierList]. // (If this happens, some malicious actor has taken control of the // verifier list, at which point we may as well give up entirely, // but we diagnose the problem instead.) type ambiguousVerifierError struct { name string hash uint32 } func (e *ambiguousVerifierError) Error() string { return fmt.Sprintf("ambiguous key %s+%08x", e.name, e.hash) } // VerifierList returns a [Verifiers] implementation that uses the given list of verifiers. func VerifierList(list ...Verifier) Verifiers { m := make(verifierMap) for _, v := range list { k := nameHash{v.Name(), v.KeyHash()} m[k] = append(m[k], v) } return m } type nameHash struct { name string hash uint32 } type verifierMap map[nameHash][]Verifier func (m verifierMap) Verifier(name string, hash uint32) (Verifier, error) { v, ok := m[nameHash{name, hash}] if !ok { return nil, &UnknownVerifierError{name, hash} } if len(v) > 1 { return nil, &ambiguousVerifierError{name, hash} } return v[0], nil } // A Note is a text and signatures. type Note struct { Text string // text of note Sigs []Signature // verified signatures UnverifiedSigs []Signature // unverified signatures } // A Signature is a single signature found in a note. type Signature struct { // Name and Hash give the name and key hash // for the key that generated the signature. Name string Hash uint32 // Base64 records the base64-encoded signature bytes. Base64 string } // An UnverifiedNoteError indicates that the note // successfully parsed but had no verifiable signatures. type UnverifiedNoteError struct { Note *Note } func (e *UnverifiedNoteError) Error() string { return "note has no verifiable signatures" } // An InvalidSignatureError indicates that the given key was known // and the associated Verifier rejected the signature. type InvalidSignatureError struct { Name string Hash uint32 } func (e *InvalidSignatureError) Error() string { return fmt.Sprintf("invalid signature for key %s+%08x", e.Name, e.Hash) } var ( errMalformedNote = errors.New("malformed note") errInvalidSigner = errors.New("invalid signer") errMismatchedVerifier = errors.New("verifier name or hash doesn't match signature") sigSplit = []byte("\n\n") sigPrefix = []byte("— ") ) // Open opens and parses the message msg, checking signatures from the known verifiers. // // For each signature in the message, Open calls known.Verifier to find a verifier. // If known.Verifier returns a verifier and the verifier accepts the signature, // Open records the signature in the returned note's Sigs field. // If known.Verifier returns a verifier but the verifier rejects the signature, // Open returns an InvalidSignatureError. // If known.Verifier returns an UnknownVerifierError, // Open records the signature in the returned note's UnverifiedSigs field. // If known.Verifier returns any other error, Open returns that error. // // If no known verifier has signed an otherwise valid note, // Open returns an [UnverifiedNoteError]. // In this case, the unverified note can be fetched from inside the error. func Open(msg []byte, known Verifiers) (*Note, error) { if known == nil { // Treat nil Verifiers as empty list, to produce useful error instead of crash. known = VerifierList() } // Must have valid UTF-8 with no non-newline ASCII control characters. for i := 0; i < len(msg); { r, size := utf8.DecodeRune(msg[i:]) if r < 0x20 && r != '\n' || r == utf8.RuneError && size == 1 { return nil, errMalformedNote } i += size } // Must end with signature block preceded by blank line. split := bytes.LastIndex(msg, sigSplit) if split < 0 { return nil, errMalformedNote } text, sigs := msg[:split+1], msg[split+2:] if len(sigs) == 0 || sigs[len(sigs)-1] != '\n' { return nil, errMalformedNote } n := &Note{ Text: string(text), } // Parse and verify signatures. // Ignore duplicate signatures. seen := make(map[nameHash]bool) seenUnverified := make(map[string]bool) numSig := 0 for len(sigs) > 0 { // Pull out next signature line. // We know sigs[len(sigs)-1] == '\n', so IndexByte always finds one. i := bytes.IndexByte(sigs, '\n') line := sigs[:i] sigs = sigs[i+1:] if !bytes.HasPrefix(line, sigPrefix) { return nil, errMalformedNote } line = line[len(sigPrefix):] name, b64 := chop(string(line), " ") sig, err := base64.StdEncoding.DecodeString(b64) if err != nil || !isValidName(name) || b64 == "" || len(sig) < 5 { return nil, errMalformedNote } hash := binary.BigEndian.Uint32(sig[0:4]) sig = sig[4:] if numSig++; numSig > 100 { // Avoid spending forever parsing a note with many signatures. return nil, errMalformedNote } v, err := known.Verifier(name, hash) if _, ok := err.(*UnknownVerifierError); ok { // Drop repeated identical unverified signatures. if seenUnverified[string(line)] { continue } seenUnverified[string(line)] = true n.UnverifiedSigs = append(n.UnverifiedSigs, Signature{Name: name, Hash: hash, Base64: b64}) continue } if err != nil { return nil, err } // Check that known.Verifier returned the right verifier. if v.Name() != name || v.KeyHash() != hash { return nil, errMismatchedVerifier } // Drop repeated signatures by a single verifier. if seen[nameHash{name, hash}] { continue } seen[nameHash{name, hash}] = true ok := v.Verify(text, sig) if !ok { return nil, &InvalidSignatureError{name, hash} } n.Sigs = append(n.Sigs, Signature{Name: name, Hash: hash, Base64: b64}) } // Parsed and verified all the signatures. if len(n.Sigs) == 0 { return nil, &UnverifiedNoteError{n} } return n, nil } // Sign signs the note with the given signers and returns the encoded message. // The new signatures from signers are listed in the encoded message after // the existing signatures already present in n.Sigs. // If any signer uses the same key as an existing signature, // the existing signature is elided from the output. func Sign(n *Note, signers ...Signer) ([]byte, error) { var buf bytes.Buffer if !strings.HasSuffix(n.Text, "\n") { return nil, errMalformedNote } buf.WriteString(n.Text) // Prepare signatures. var sigs bytes.Buffer have := make(map[nameHash]bool) for _, s := range signers { name := s.Name() hash := s.KeyHash() have[nameHash{name, hash}] = true if !isValidName(name) { return nil, errInvalidSigner } sig, err := s.Sign(buf.Bytes()) // buf holds n.Text if err != nil { return nil, err } var hbuf [4]byte binary.BigEndian.PutUint32(hbuf[:], hash) b64 := base64.StdEncoding.EncodeToString(append(hbuf[:], sig...)) sigs.WriteString("— ") sigs.WriteString(name) sigs.WriteString(" ") sigs.WriteString(b64) sigs.WriteString("\n") } buf.WriteString("\n") // Emit existing signatures not replaced by new ones. for _, list := range [][]Signature{n.Sigs, n.UnverifiedSigs} { for _, sig := range list { name, hash := sig.Name, sig.Hash if !isValidName(name) { return nil, errMalformedNote } if have[nameHash{name, hash}] { continue } // Double-check hash against base64. raw, err := base64.StdEncoding.DecodeString(sig.Base64) if err != nil || len(raw) < 4 || binary.BigEndian.Uint32(raw) != hash { return nil, errMalformedNote } buf.WriteString("— ") buf.WriteString(sig.Name) buf.WriteString(" ") buf.WriteString(sig.Base64) buf.WriteString("\n") } } buf.Write(sigs.Bytes()) return buf.Bytes(), nil } mod-0.19.0/sumdb/note/note_test.go000066400000000000000000000363411463702072700170110ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package note import ( "crypto/ed25519" "crypto/rand" "errors" "strings" "testing" "testing/iotest" ) func TestNewVerifier(t *testing.T) { vkey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW" _, err := NewVerifier(vkey) if err != nil { t.Fatal(err) } // Check various manglings are not accepted. badKey := func(k string) { _, err := NewVerifier(k) if err == nil { t.Errorf("NewVerifier(%q) succeeded, should have failed", k) } } b := []byte(vkey) for i := 0; i <= len(b); i++ { for j := i + 1; j <= len(b); j++ { if i != 0 || j != len(b) { badKey(string(b[i:j])) } } } for i := 0; i < len(b); i++ { b[i]++ badKey(string(b)) b[i]-- } badKey("PeterNeumann+cc469956+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TWBADKEY==") // wrong length key, with adjusted key hash badKey("PeterNeumann+173116ae+ZRpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW") // unknown algorithm, with adjusted key hash } func TestNewSigner(t *testing.T) { skey := "PRIVATE+KEY+PeterNeumann+c74f20a3+AYEKFALVFGyNhPJEMzD1QIDr+Y7hfZx09iUvxdXHKDFz" _, err := NewSigner(skey) if err != nil { t.Fatal(err) } // Check various manglings are not accepted. b := []byte(skey) for i := 0; i <= len(b); i++ { for j := i + 1; j <= len(b); j++ { if i == 0 && j == len(b) { continue } _, err := NewSigner(string(b[i:j])) if err == nil { t.Errorf("NewSigner(%q) succeeded, should have failed", b[i:j]) } } } for i := 0; i < len(b); i++ { b[i]++ _, err := NewSigner(string(b)) if err == nil { t.Errorf("NewSigner(%q) succeeded, should have failed", b) } b[i]-- } } func testSignerAndVerifier(t *testing.T, Name string, signer Signer, verifier Verifier) { if name := signer.Name(); name != Name { t.Errorf("signer.Name() = %q, want %q", name, Name) } if name := verifier.Name(); name != Name { t.Errorf("verifier.Name() = %q, want %q", name, Name) } shash := signer.KeyHash() vhash := verifier.KeyHash() if shash != vhash { t.Errorf("signer.KeyHash() = %#08x != verifier.KeyHash() = %#08x", shash, vhash) } msg := []byte("hi") sig, err := signer.Sign(msg) if err != nil { t.Fatalf("signer.Sign: %v", err) } if !verifier.Verify(msg, sig) { t.Fatalf("verifier.Verify failed on signature returned by signer.Sign") } sig[0]++ if verifier.Verify(msg, sig) { t.Fatalf("verifier.Verify succeeded on corrupt signature") } sig[0]-- msg[0]++ if verifier.Verify(msg, sig) { t.Fatalf("verifier.Verify succeeded on corrupt message") } } func TestGenerateKey(t *testing.T) { // Generate key pair, make sure it is all self-consistent. const Name = "EnochRoot" skey, vkey, err := GenerateKey(rand.Reader, Name) if err != nil { t.Fatalf("GenerateKey: %v", err) } signer, err := NewSigner(skey) if err != nil { t.Fatalf("NewSigner: %v", err) } verifier, err := NewVerifier(vkey) if err != nil { t.Fatalf("NewVerifier: %v", err) } testSignerAndVerifier(t, Name, signer, verifier) // Check that GenerateKey returns error from rand reader. _, _, err = GenerateKey(iotest.TimeoutReader(iotest.OneByteReader(rand.Reader)), Name) if err == nil { t.Fatalf("GenerateKey succeeded with error-returning rand reader") } } func TestFromEd25519(t *testing.T) { const Name = "EnochRoot" pub, priv, err := ed25519.GenerateKey(rand.Reader) if err != nil { t.Fatalf("GenerateKey: %v", err) } signer, err := newSignerFromEd25519Seed(Name, priv.Seed()) if err != nil { t.Fatalf("newSignerFromEd25519Seed: %v", err) } vkey, err := NewEd25519VerifierKey(Name, pub) if err != nil { t.Fatalf("NewEd25519VerifierKey: %v", err) } verifier, err := NewVerifier(vkey) if err != nil { t.Fatalf("NewVerifier: %v", err) } testSignerAndVerifier(t, Name, signer, verifier) // Check that wrong key sizes return errors. _, err = NewEd25519VerifierKey(Name, pub[:len(pub)-1]) if err == nil { t.Errorf("NewEd25519VerifierKey succeeded with a seed of the wrong size") } } // newSignerFromEd25519Seed constructs a new signer from a verifier name and a // crypto/ed25519 private key seed. func newSignerFromEd25519Seed(name string, seed []byte) (Signer, error) { if len(seed) != ed25519.SeedSize { return nil, errors.New("invalid seed size") } priv := ed25519.NewKeyFromSeed(seed) pub := priv[32:] pubkey := append([]byte{algEd25519}, pub...) hash := keyHash(name, pubkey) s := &signer{ name: name, hash: hash, sign: func(msg []byte) ([]byte, error) { return ed25519.Sign(priv, msg), nil }, } return s, nil } func TestSign(t *testing.T) { skey := "PRIVATE+KEY+PeterNeumann+c74f20a3+AYEKFALVFGyNhPJEMzD1QIDr+Y7hfZx09iUvxdXHKDFz" text := "If you think cryptography is the answer to your problem,\n" + "then you don't know what your problem is.\n" signer, err := NewSigner(skey) if err != nil { t.Fatal(err) } msg, err := Sign(&Note{Text: text}, signer) if err != nil { t.Fatal(err) } want := `If you think cryptography is the answer to your problem, then you don't know what your problem is. — PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM= ` if string(msg) != want { t.Errorf("Sign: wrong output\nhave:\n%s\nwant:\n%s", msg, want) } // Check that existing signature is replaced by new one. msg, err = Sign(&Note{Text: text, Sigs: []Signature{{Name: "PeterNeumann", Hash: 0xc74f20a3, Base64: "BADSIGN="}}}, signer) if err != nil { t.Fatal(err) } if string(msg) != want { t.Errorf("Sign replacing signature: wrong output\nhave:\n%s\nwant:\n%s", msg, want) } // Check various bad inputs. _, err = Sign(&Note{Text: "abc"}, signer) if err == nil || err.Error() != "malformed note" { t.Fatalf("Sign with short text: %v, want malformed note error", err) } _, err = Sign(&Note{Text: text, Sigs: []Signature{{Name: "a+b", Base64: "ABCD"}}}) if err == nil || err.Error() != "malformed note" { t.Fatalf("Sign with bad name: %v, want malformed note error", err) } _, err = Sign(&Note{Text: text, Sigs: []Signature{{Name: "PeterNeumann", Hash: 0xc74f20a3, Base64: "BADHASH="}}}) if err == nil || err.Error() != "malformed note" { t.Fatalf("Sign with bad pre-filled signature: %v, want malformed note error", err) } _, err = Sign(&Note{Text: text}, &badSigner{signer}) if err == nil || err.Error() != "invalid signer" { t.Fatalf("Sign with bad signer: %v, want invalid signer error", err) } _, err = Sign(&Note{Text: text}, &errSigner{signer}) if err != errSurprise { t.Fatalf("Sign with failing signer: %v, want errSurprise", err) } } func TestVerifierList(t *testing.T) { peterKey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW" peterVerifier, err := NewVerifier(peterKey) if err != nil { t.Fatal(err) } enochKey := "EnochRoot+af0cfe78+ATtqJ7zOtqQtYqOo0CpvDXNlMhV3HeJDpjrASKGLWdop" enochVerifier, err := NewVerifier(enochKey) if err != nil { t.Fatal(err) } list := VerifierList(peterVerifier, enochVerifier, enochVerifier) v, err := list.Verifier("PeterNeumann", 0xc74f20a3) if v != peterVerifier || err != nil { t.Fatalf("list.Verifier(peter) = %v, %v, want %v, nil", v, err, peterVerifier) } v, err = list.Verifier("PeterNeumann", 0xc74f20a4) if v != nil || err == nil || err.Error() != "unknown key PeterNeumann+c74f20a4" { t.Fatalf("list.Verifier(peter bad hash) = %v, %v, want nil, unknown key error", v, err) } v, err = list.Verifier("PeterNeuman", 0xc74f20a3) if v != nil || err == nil || err.Error() != "unknown key PeterNeuman+c74f20a3" { t.Fatalf("list.Verifier(peter bad name) = %v, %v, want nil, unknown key error", v, err) } v, err = list.Verifier("EnochRoot", 0xaf0cfe78) if v != nil || err == nil || err.Error() != "ambiguous key EnochRoot+af0cfe78" { t.Fatalf("list.Verifier(enoch) = %v, %v, want nil, ambiguous key error", v, err) } } type badSigner struct { Signer } func (b *badSigner) Name() string { return "bad name" } var errSurprise = errors.New("surprise!") type errSigner struct { Signer } func (e *errSigner) Sign([]byte) ([]byte, error) { return nil, errSurprise } type fixedVerifier struct{ v Verifier } func (v fixedVerifier) Verifier(name string, hash uint32) (Verifier, error) { return v.v, nil } func TestOpen(t *testing.T) { peterKey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW" peterVerifier, err := NewVerifier(peterKey) if err != nil { t.Fatal(err) } enochKey := "EnochRoot+af0cfe78+ATtqJ7zOtqQtYqOo0CpvDXNlMhV3HeJDpjrASKGLWdop" enochVerifier, err := NewVerifier(enochKey) if err != nil { t.Fatal(err) } text := `If you think cryptography is the answer to your problem, then you don't know what your problem is. ` peterSig := "— PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM=\n" enochSig := "— EnochRoot rwz+eBzmZa0SO3NbfRGzPCpDckykFXSdeX+MNtCOXm2/5n2tiOHp+vAF1aGrQ5ovTG01oOTGwnWLox33WWd1RvMc+QQ=\n" peter := Signature{"PeterNeumann", 0xc74f20a3, "x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM="} enoch := Signature{"EnochRoot", 0xaf0cfe78, "rwz+eBzmZa0SO3NbfRGzPCpDckykFXSdeX+MNtCOXm2/5n2tiOHp+vAF1aGrQ5ovTG01oOTGwnWLox33WWd1RvMc+QQ="} // Check one signature verified, one not. n, err := Open([]byte(text+"\n"+peterSig+enochSig), VerifierList(peterVerifier)) if err != nil { t.Fatal(err) } if n.Text != text { t.Errorf("n.Text = %q, want %q", n.Text, text) } if len(n.Sigs) != 1 || n.Sigs[0] != peter { t.Errorf("n.Sigs:\nhave %v\nwant %v", n.Sigs, []Signature{peter}) } if len(n.UnverifiedSigs) != 1 || n.UnverifiedSigs[0] != enoch { t.Errorf("n.UnverifiedSigs:\nhave %v\nwant %v", n.Sigs, []Signature{peter}) } // Check both verified. n, err = Open([]byte(text+"\n"+peterSig+enochSig), VerifierList(peterVerifier, enochVerifier)) if err != nil { t.Fatal(err) } if len(n.Sigs) != 2 || n.Sigs[0] != peter || n.Sigs[1] != enoch { t.Errorf("n.Sigs:\nhave %v\nwant %v", n.Sigs, []Signature{peter, enoch}) } if len(n.UnverifiedSigs) != 0 { t.Errorf("n.UnverifiedSigs:\nhave %v\nwant %v", n.Sigs, []Signature{}) } // Check both unverified. n, err = Open([]byte(text+"\n"+peterSig+enochSig), VerifierList()) if n != nil || err == nil { t.Fatalf("Open unverified = %v, %v, want nil, error", n, err) } e, ok := err.(*UnverifiedNoteError) if !ok { t.Fatalf("Open unverified: err is %T, want *UnverifiedNoteError", err) } if err.Error() != "note has no verifiable signatures" { t.Fatalf("Open unverified: err.Error() = %q, want %q", err.Error(), "note has no verifiable signatures") } n = e.Note if n == nil { t.Fatalf("Open unverified: missing note in UnverifiedNoteError") } if len(n.Sigs) != 0 { t.Errorf("n.Sigs:\nhave %v\nwant %v", n.Sigs, []Signature{}) } if len(n.UnverifiedSigs) != 2 || n.UnverifiedSigs[0] != peter || n.UnverifiedSigs[1] != enoch { t.Errorf("n.UnverifiedSigs:\nhave %v\nwant %v", n.Sigs, []Signature{peter, enoch}) } // Check duplicated verifier. _, err = Open([]byte(text+"\n"+enochSig), VerifierList(enochVerifier, peterVerifier, enochVerifier)) if err == nil || err.Error() != "ambiguous key EnochRoot+af0cfe78" { t.Fatalf("Open with duplicated verifier: err=%v, want ambiguous key", err) } // Check unused duplicated verifier. _, err = Open([]byte(text+"\n"+peterSig), VerifierList(enochVerifier, peterVerifier, enochVerifier)) if err != nil { t.Fatal(err) } // Check too many signatures. n, err = Open([]byte(text+"\n"+strings.Repeat(peterSig, 101)), VerifierList(peterVerifier)) if n != nil || err == nil || err.Error() != "malformed note" { t.Fatalf("Open too many verified signatures = %v, %v, want nil, malformed note error", n, err) } n, err = Open([]byte(text+"\n"+strings.Repeat(peterSig, 101)), VerifierList()) if n != nil || err == nil || err.Error() != "malformed note" { t.Fatalf("Open too many verified signatures = %v, %v, want nil, malformed note error", n, err) } // Invalid signature. n, err = Open([]byte(text+"\n"+peterSig[:60]+"ABCD"+peterSig[60:]), VerifierList(peterVerifier)) if n != nil || err == nil || err.Error() != "invalid signature for key PeterNeumann+c74f20a3" { t.Fatalf("Open too many verified signatures = %v, %v, want nil, invalid signature error", n, err) } // Duplicated verified and unverified signatures. enochABCD := Signature{"EnochRoot", 0xaf0cfe78, "rwz+eBzmZa0SO3NbfRGzPCpDckykFXSdeX+MNtCOXm2/5n" + "ABCD" + "2tiOHp+vAF1aGrQ5ovTG01oOTGwnWLox33WWd1RvMc+QQ="} n, err = Open([]byte(text+"\n"+peterSig+peterSig+enochSig+enochSig+enochSig[:60]+"ABCD"+enochSig[60:]), VerifierList(peterVerifier)) if err != nil { t.Fatal(err) } if len(n.Sigs) != 1 || n.Sigs[0] != peter { t.Errorf("n.Sigs:\nhave %v\nwant %v", n.Sigs, []Signature{peter}) } if len(n.UnverifiedSigs) != 2 || n.UnverifiedSigs[0] != enoch || n.UnverifiedSigs[1] != enochABCD { t.Errorf("n.UnverifiedSigs:\nhave %v\nwant %v", n.UnverifiedSigs, []Signature{enoch, enochABCD}) } // Invalid encoded message syntax. badMsgs := []string{ text, text + "\n", text + "\n" + peterSig[:len(peterSig)-1], "\x01" + text + "\n" + peterSig, "\xff" + text + "\n" + peterSig, text + "\n" + "— Bad Name x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM=", text + "\n" + peterSig + "Unexpected line.\n", } for _, msg := range badMsgs { n, err := Open([]byte(msg), VerifierList(peterVerifier)) if n != nil || err == nil || err.Error() != "malformed note" { t.Fatalf("Open bad msg = %v, %v, want nil, malformed note error\nmsg:\n%s", n, err, msg) } } // Verifiers returns a Verifier for the wrong name or hash. misnamedSig := strings.Replace(peterSig, "PeterNeumann", "CarmenSandiego", -1) _, err = Open([]byte(text+"\n"+misnamedSig), fixedVerifier{peterVerifier}) if err != errMismatchedVerifier { t.Fatalf("Open with wrong Verifier, err=%v, want errMismatchedVerifier", err) } wrongHash := strings.Replace(peterSig, "x08g", "xxxx", -1) _, err = Open([]byte(text+"\n"+wrongHash), fixedVerifier{peterVerifier}) if err != errMismatchedVerifier { t.Fatalf("Open with wrong Verifier, err=%v, want errMismatchedVerifier", err) } } func BenchmarkOpen(b *testing.B) { vkey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW" msg := []byte("If you think cryptography is the answer to your problem,\n" + "then you don't know what your problem is.\n" + "\n" + "— PeterNeumann x08go/ZJkuBS9UG/SffcvIAQxVBtiFupLLr8pAcElZInNIuGUgYN1FFYC2pZSNXgKvqfqdngotpRZb6KE6RyyBwJnAM=\n") verifier, err := NewVerifier(vkey) if err != nil { b.Fatal(err) } verifiers := VerifierList(verifier) verifiers0 := VerifierList() // Try with 0 signatures and 1 signature so we can tell how much each signature adds. b.Run("Sig0", func(b *testing.B) { for i := 0; i < b.N; i++ { _, err := Open(msg, verifiers0) e, ok := err.(*UnverifiedNoteError) if !ok { b.Fatal("expected UnverifiedNoteError") } n := e.Note if len(n.Sigs) != 0 || len(n.UnverifiedSigs) != 1 { b.Fatal("wrong signature count") } } }) b.Run("Sig1", func(b *testing.B) { for i := 0; i < b.N; i++ { n, err := Open(msg, verifiers) if err != nil { b.Fatal(err) } if len(n.Sigs) != 1 || len(n.UnverifiedSigs) != 0 { b.Fatal("wrong signature count") } } }) } mod-0.19.0/sumdb/server.go000066400000000000000000000115751463702072700153500ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package sumdb implements the HTTP protocols for serving or accessing a module checksum database. package sumdb import ( "context" "net/http" "os" "strings" "golang.org/x/mod/internal/lazyregexp" "golang.org/x/mod/module" "golang.org/x/mod/sumdb/tlog" ) // A ServerOps provides the external operations // (underlying database access and so on) needed by the [Server]. type ServerOps interface { // Signed returns the signed hash of the latest tree. Signed(ctx context.Context) ([]byte, error) // ReadRecords returns the content for the n records id through id+n-1. ReadRecords(ctx context.Context, id, n int64) ([][]byte, error) // Lookup looks up a record for the given module, // returning the record ID. Lookup(ctx context.Context, m module.Version) (int64, error) // ReadTileData reads the content of tile t. // It is only invoked for hash tiles (t.L ≥ 0). ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error) } // A Server is the checksum database HTTP server, // which implements http.Handler and should be invoked // to serve the paths listed in [ServerPaths]. type Server struct { ops ServerOps } // NewServer returns a new Server using the given operations. func NewServer(ops ServerOps) *Server { return &Server{ops: ops} } // ServerPaths are the URL paths the Server can (and should) serve. // // Typically a server will do: // // srv := sumdb.NewServer(ops) // for _, path := range sumdb.ServerPaths { // http.Handle(path, srv) // } var ServerPaths = []string{ "/lookup/", "/latest", "/tile/", } var modVerRE = lazyregexp.New(`^[^@]+@v[0-9]+\.[0-9]+\.[0-9]+(-[^@]*)?(\+incompatible)?$`) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { ctx := r.Context() switch { default: http.NotFound(w, r) case strings.HasPrefix(r.URL.Path, "/lookup/"): mod := strings.TrimPrefix(r.URL.Path, "/lookup/") if !modVerRE.MatchString(mod) { http.Error(w, "invalid module@version syntax", http.StatusBadRequest) return } i := strings.Index(mod, "@") escPath, escVers := mod[:i], mod[i+1:] path, err := module.UnescapePath(escPath) if err != nil { reportError(w, err) return } vers, err := module.UnescapeVersion(escVers) if err != nil { reportError(w, err) return } id, err := s.ops.Lookup(ctx, module.Version{Path: path, Version: vers}) if err != nil { reportError(w, err) return } records, err := s.ops.ReadRecords(ctx, id, 1) if err != nil { // This should never happen - the lookup says the record exists. http.Error(w, err.Error(), http.StatusInternalServerError) return } if len(records) != 1 { http.Error(w, "invalid record count returned by ReadRecords", http.StatusInternalServerError) return } msg, err := tlog.FormatRecord(id, records[0]) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } signed, err := s.ops.Signed(ctx) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "text/plain; charset=UTF-8") w.Write(msg) w.Write(signed) case r.URL.Path == "/latest": data, err := s.ops.Signed(ctx) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "text/plain; charset=UTF-8") w.Write(data) case strings.HasPrefix(r.URL.Path, "/tile/"): t, err := tlog.ParseTilePath(r.URL.Path[1:]) if err != nil { http.Error(w, "invalid tile syntax", http.StatusBadRequest) return } if t.L == -1 { // Record data. start := t.N << uint(t.H) records, err := s.ops.ReadRecords(ctx, start, int64(t.W)) if err != nil { reportError(w, err) return } if len(records) != t.W { http.Error(w, "invalid record count returned by ReadRecords", http.StatusInternalServerError) return } var data []byte for i, text := range records { msg, err := tlog.FormatRecord(start+int64(i), text) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } data = append(data, msg...) } w.Header().Set("Content-Type", "text/plain; charset=UTF-8") w.Write(data) return } data, err := s.ops.ReadTileData(ctx, t) if err != nil { reportError(w, err) return } w.Header().Set("Content-Type", "application/octet-stream") w.Write(data) } } // reportError reports err to w. // If it's a not-found, the reported error is 404. // Otherwise it is an internal server error. // The caller must only call reportError in contexts where // a not-found err should be reported as 404. func reportError(w http.ResponseWriter, err error) { if os.IsNotExist(err) { http.Error(w, err.Error(), http.StatusNotFound) return } http.Error(w, err.Error(), http.StatusInternalServerError) } mod-0.19.0/sumdb/storage/000077500000000000000000000000001463702072700151465ustar00rootroot00000000000000mod-0.19.0/sumdb/storage/mem.go000066400000000000000000000052211463702072700162530ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package storage import ( "context" "errors" "math/rand" "sync" ) // Mem is an in-memory implementation of [Storage]. // It is meant for tests and does not store any data to persistent storage. // // The zero value is an empty Mem ready for use. type Mem struct { mu sync.RWMutex table map[string]string } // A memTx is a transaction in a Mem. type memTx struct { m *Mem writes []Write } // errRetry is an internal sentinel indicating that the transaction should be retried. // It is never returned to the caller. var errRetry = errors.New("retry") // ReadOnly runs f in a read-only transaction. func (m *Mem) ReadOnly(ctx context.Context, f func(context.Context, Transaction) error) error { tx := &memTx{m: m} for { err := func() error { m.mu.Lock() defer m.mu.Unlock() if err := f(ctx, tx); err != nil { return err } // Spurious retry with 10% probability. if rand.Intn(10) == 0 { return errRetry } return nil }() if err != errRetry { return err } } } // ReadWrite runs f in a read-write transaction. func (m *Mem) ReadWrite(ctx context.Context, f func(context.Context, Transaction) error) error { tx := &memTx{m: m} for { err := func() error { m.mu.Lock() defer m.mu.Unlock() tx.writes = []Write{} if err := f(ctx, tx); err != nil { return err } // Spurious retry with 10% probability. if rand.Intn(10) == 0 { return errRetry } if m.table == nil { m.table = make(map[string]string) } for _, w := range tx.writes { if w.Value == "" { delete(m.table, w.Key) } else { m.table[w.Key] = w.Value } } return nil }() if err != errRetry { return err } } } // ReadValues returns the values associated with the given keys. func (tx *memTx) ReadValues(ctx context.Context, keys []string) ([]string, error) { vals := make([]string, len(keys)) for i, key := range keys { vals[i] = tx.m.table[key] } return vals, nil } // ReadValue returns the value associated with the single key. func (tx *memTx) ReadValue(ctx context.Context, key string) (string, error) { return tx.m.table[key], nil } // BufferWrites buffers a list of writes to be applied // to the table when the transaction commits. // The changes are not visible to reads within the transaction. // The map argument is not used after the call returns. func (tx *memTx) BufferWrites(list []Write) error { if tx.writes == nil { panic("BufferWrite on read-only transaction") } tx.writes = append(tx.writes, list...) return nil } mod-0.19.0/sumdb/storage/mem_test.go000066400000000000000000000004421463702072700173120ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package storage import ( "context" "testing" ) func TestMem(t *testing.T) { TestStorage(t, context.Background(), new(Mem)) } mod-0.19.0/sumdb/storage/storage.go000066400000000000000000000051241463702072700171430ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package storage defines storage interfaces for and a basic implementation of a checksum database. package storage import "context" // A Storage is a transaction key-value storage system. type Storage interface { // ReadOnly runs f in a read-only transaction. // It is equivalent to ReadWrite except that the // transaction's BufferWrite method will fail unconditionally. // (The implementation may be able to optimize the // transaction if it knows at the start that no writes will happen.) ReadOnly(ctx context.Context, f func(context.Context, Transaction) error) error // ReadWrite runs f in a read-write transaction. // If f returns an error, the transaction aborts and returns that error. // If f returns nil, the transaction attempts to commit and then return nil. // Otherwise it tries again. Note that f may be called multiple times and that // the result only describes the effect of the final call to f. // The caller must take care not to use any state computed during // earlier calls to f, or even the last call to f when an error is returned. ReadWrite(ctx context.Context, f func(context.Context, Transaction) error) error } // A Transaction provides read and write operations within a transaction, // as executed by [Storage]'s ReadOnly or ReadWrite methods. type Transaction interface { // ReadValue reads the value associated with a single key. // If there is no value associated with that key, ReadKey returns an empty value. // An error is only returned for problems accessing the storage. ReadValue(ctx context.Context, key string) (value string, err error) // ReadValues reads the values associated with the given keys. // If there is no value stored for a given key, ReadValues returns an empty value for that key. // An error is only returned for problems accessing the storage. ReadValues(ctx context.Context, keys []string) (values []string, err error) // BufferWrites buffers the given writes, // to be applied at the end of the transaction. // BufferWrites panics if this is a ReadOnly transaction. // It returns an error if it detects any other problems. // The behavior of multiple writes buffered using the same key // is undefined: it may return an error or not. BufferWrites(writes []Write) error } // A Write is a single change to be applied at the end of a read-write transaction. // A Write with an empty value deletes the value associated with the given key. type Write struct { Key string Value string } mod-0.19.0/sumdb/storage/test.go000066400000000000000000000033211463702072700164530ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package storage import ( "context" "fmt" "io" "testing" ) // TestStorage tests a Storage implementation. func TestStorage(t *testing.T, ctx context.Context, storage Storage) { s := storage // Insert records. err := s.ReadWrite(ctx, func(ctx context.Context, tx Transaction) error { for i := 0; i < 10; i++ { err := tx.BufferWrites([]Write{ {Key: fmt.Sprint(i), Value: fmt.Sprint(-i)}, {Key: fmt.Sprint(1000 + i), Value: fmt.Sprint(-1000 - i)}, }) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } // Read the records back. testRead := func() { err := s.ReadOnly(ctx, func(ctx context.Context, tx Transaction) error { for i := int64(0); i < 1010; i++ { if i == 10 { i = 1000 } val, err := tx.ReadValue(ctx, fmt.Sprint(i)) if err != nil { t.Fatalf("reading %v: %v", i, err) } if want := fmt.Sprint(-i); val != want { t.Fatalf("ReadValue %v = %q, want %v", i, val, want) } } return nil }) if err != nil { t.Fatal(err) } } testRead() // Buffered writes in failed transaction should not be applied. err = s.ReadWrite(ctx, func(ctx context.Context, tx Transaction) error { tx.BufferWrites([]Write{ {Key: fmt.Sprint(0), Value: ""}, // delete {Key: fmt.Sprint(1), Value: "overwrite"}, // overwrite }) if err != nil { t.Fatal(err) } return io.ErrUnexpectedEOF }) if err != io.ErrUnexpectedEOF { t.Fatalf("ReadWrite returned %v, want ErrUnexpectedEOF", err) } // All same values should still be there. testRead() } mod-0.19.0/sumdb/test.go000066400000000000000000000055131463702072700150140ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sumdb import ( "context" "fmt" "sync" "golang.org/x/mod/module" "golang.org/x/mod/sumdb/note" "golang.org/x/mod/sumdb/tlog" ) // NewTestServer constructs a new [TestServer] // that will sign its tree with the given signer key // (see [golang.org/x/mod/sumdb/note]) // and fetch new records as needed by calling gosum. func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer { return &TestServer{signer: signer, gosum: gosum} } // A TestServer is an in-memory implementation of [ServerOps] for testing. type TestServer struct { signer string gosum func(path, vers string) ([]byte, error) mu sync.Mutex hashes testHashes records [][]byte lookup map[string]int64 } // testHashes implements tlog.HashReader, reading from a slice. type testHashes []tlog.Hash func (h testHashes) ReadHashes(indexes []int64) ([]tlog.Hash, error) { var list []tlog.Hash for _, id := range indexes { list = append(list, h[id]) } return list, nil } func (s *TestServer) Signed(ctx context.Context) ([]byte, error) { s.mu.Lock() defer s.mu.Unlock() size := int64(len(s.records)) h, err := tlog.TreeHash(size, s.hashes) if err != nil { return nil, err } text := tlog.FormatTree(tlog.Tree{N: size, Hash: h}) signer, err := note.NewSigner(s.signer) if err != nil { return nil, err } return note.Sign(¬e.Note{Text: string(text)}, signer) } func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, error) { s.mu.Lock() defer s.mu.Unlock() var list [][]byte for i := int64(0); i < n; i++ { if id+i >= int64(len(s.records)) { return nil, fmt.Errorf("missing records") } list = append(list, s.records[id+i]) } return list, nil } func (s *TestServer) Lookup(ctx context.Context, m module.Version) (int64, error) { key := m.String() s.mu.Lock() id, ok := s.lookup[key] s.mu.Unlock() if ok { return id, nil } // Look up module and compute go.sum lines. data, err := s.gosum(m.Path, m.Version) if err != nil { return 0, err } s.mu.Lock() defer s.mu.Unlock() // We ran the fetch without the lock. // If another fetch happened and committed, use it instead. id, ok = s.lookup[key] if ok { return id, nil } // Add record. id = int64(len(s.records)) s.records = append(s.records, data) if s.lookup == nil { s.lookup = make(map[string]int64) } s.lookup[key] = id hashes, err := tlog.StoredHashesForRecordHash(id, tlog.RecordHash(data), s.hashes) if err != nil { panic(err) } s.hashes = append(s.hashes, hashes...) return id, nil } func (s *TestServer) ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error) { s.mu.Lock() defer s.mu.Unlock() return tlog.ReadTileData(t, s.hashes) } mod-0.19.0/sumdb/tlog/000077500000000000000000000000001463702072700144475ustar00rootroot00000000000000mod-0.19.0/sumdb/tlog/ct_test.go000066400000000000000000000042451463702072700164500ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tlog import ( "encoding/json" "fmt" "io" "net/http" "net/url" "os" "testing" ) func TestCertificateTransparency(t *testing.T) { // Test that we can verify actual Certificate Transparency proofs. // (The other tests check that we can verify our own proofs; // this is a test that the two are compatible.) if testing.Short() { t.Skip("skipping in -short mode") } var root ctTree httpGET(t, "http://ct.googleapis.com/logs/argon2020/ct/v1/get-sth", &root) var leaf ctEntries httpGET(t, "http://ct.googleapis.com/logs/argon2020/ct/v1/get-entries?start=10000&end=10000", &leaf) hash := RecordHash(leaf.Entries[0].Data) var rp ctRecordProof httpGET(t, "http://ct.googleapis.com/logs/argon2020/ct/v1/get-proof-by-hash?tree_size="+fmt.Sprint(root.Size)+"&hash="+url.QueryEscape(hash.String()), &rp) err := CheckRecord(rp.Proof, root.Size, root.Hash, 10000, hash) if err != nil { t.Fatal(err) } var tp ctTreeProof httpGET(t, "http://ct.googleapis.com/logs/argon2020/ct/v1/get-sth-consistency?first=3654490&second="+fmt.Sprint(root.Size), &tp) oh, _ := ParseHash("AuIZ5V6sDUj1vn3Y1K85oOaQ7y+FJJKtyRTl1edIKBQ=") err = CheckTree(tp.Proof, root.Size, root.Hash, 3654490, oh) if err != nil { t.Fatal(err) } } type ctTree struct { Size int64 `json:"tree_size"` Hash Hash `json:"sha256_root_hash"` } type ctEntries struct { Entries []*ctEntry } type ctEntry struct { Data []byte `json:"leaf_input"` } type ctRecordProof struct { Index int64 `json:"leaf_index"` Proof RecordProof `json:"audit_path"` } type ctTreeProof struct { Proof TreeProof `json:"consistency"` } func httpGET(t *testing.T, url string, targ interface{}) { if testing.Verbose() { println() println(url) } resp, err := http.Get(url) if err != nil { t.Fatal(err) } defer resp.Body.Close() data, err := io.ReadAll(resp.Body) if err != nil { t.Fatal(err) } if testing.Verbose() { os.Stdout.Write(data) } err = json.Unmarshal(data, targ) if err != nil { println(url) os.Stdout.Write(data) t.Fatal(err) } } mod-0.19.0/sumdb/tlog/note.go000066400000000000000000000073401463702072700157470ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tlog import ( "bytes" "encoding/base64" "errors" "fmt" "strconv" "strings" "unicode/utf8" ) // A Tree is a tree description, to be signed by a go.sum database server. type Tree struct { N int64 Hash Hash } // FormatTree formats a tree description for inclusion in a note. // // The encoded form is three lines, each ending in a newline (U+000A): // // go.sum database tree // N // Hash // // where N is in decimal and Hash is in base64. // // A future backwards-compatible encoding may add additional lines, // which the parser can ignore. // A future backwards-incompatible encoding would use a different // first line (for example, "go.sum database tree v2"). func FormatTree(tree Tree) []byte { return []byte(fmt.Sprintf("go.sum database tree\n%d\n%s\n", tree.N, tree.Hash)) } var errMalformedTree = errors.New("malformed tree note") var treePrefix = []byte("go.sum database tree\n") // ParseTree parses a formatted tree root description. func ParseTree(text []byte) (tree Tree, err error) { // The message looks like: // // go.sum database tree // 2 // nND/nri/U0xuHUrYSy0HtMeal2vzD9V4k/BO79C+QeI= // // For forwards compatibility, extra text lines after the encoding are ignored. if !bytes.HasPrefix(text, treePrefix) || bytes.Count(text, []byte("\n")) < 3 || len(text) > 1e6 { return Tree{}, errMalformedTree } lines := strings.SplitN(string(text), "\n", 4) n, err := strconv.ParseInt(lines[1], 10, 64) if err != nil || n < 0 || lines[1] != strconv.FormatInt(n, 10) { return Tree{}, errMalformedTree } h, err := base64.StdEncoding.DecodeString(lines[2]) if err != nil || len(h) != HashSize { return Tree{}, errMalformedTree } var hash Hash copy(hash[:], h) return Tree{n, hash}, nil } var errMalformedRecord = errors.New("malformed record data") // FormatRecord formats a record for serving to a client // in a lookup response or data tile. // // The encoded form is the record ID as a single number, // then the text of the record, and then a terminating blank line. // Record text must be valid UTF-8 and must not contain any ASCII control // characters (those below U+0020) other than newline (U+000A). // It must end in a terminating newline and not contain any blank lines. func FormatRecord(id int64, text []byte) (msg []byte, err error) { if !isValidRecordText(text) { return nil, errMalformedRecord } msg = []byte(fmt.Sprintf("%d\n", id)) msg = append(msg, text...) msg = append(msg, '\n') return msg, nil } // isValidRecordText reports whether text is syntactically valid record text. func isValidRecordText(text []byte) bool { var last rune for i := 0; i < len(text); { r, size := utf8.DecodeRune(text[i:]) if r < 0x20 && r != '\n' || r == utf8.RuneError && size == 1 || last == '\n' && r == '\n' { return false } i += size last = r } if last != '\n' { return false } return true } // ParseRecord parses a record description at the start of text, // stopping immediately after the terminating blank line. // It returns the record id, the record text, and the remainder of text. func ParseRecord(msg []byte) (id int64, text, rest []byte, err error) { // Leading record id. i := bytes.IndexByte(msg, '\n') if i < 0 { return 0, nil, nil, errMalformedRecord } id, err = strconv.ParseInt(string(msg[:i]), 10, 64) if err != nil { return 0, nil, nil, errMalformedRecord } msg = msg[i+1:] // Record text. i = bytes.Index(msg, []byte("\n\n")) if i < 0 { return 0, nil, nil, errMalformedRecord } text, rest = msg[:i+1], msg[i+2:] if !isValidRecordText(text) { return 0, nil, nil, errMalformedRecord } return id, text, rest, nil } mod-0.19.0/sumdb/tlog/note_test.go000066400000000000000000000076041463702072700170110ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tlog import ( "strings" "testing" ) func TestFormatTree(t *testing.T) { n := int64(123456789012) h := RecordHash([]byte("hello world")) golden := "go.sum database tree\n123456789012\nTszzRgjTG6xce+z2AG31kAXYKBgQVtCSCE40HmuwBb0=\n" b := FormatTree(Tree{n, h}) if string(b) != golden { t.Errorf("FormatTree(...) = %q, want %q", b, golden) } } func TestParseTree(t *testing.T) { in := "go.sum database tree\n123456789012\nTszzRgjTG6xce+z2AG31kAXYKBgQVtCSCE40HmuwBb0=\n" goldH := RecordHash([]byte("hello world")) goldN := int64(123456789012) tree, err := ParseTree([]byte(in)) if tree.N != goldN || tree.Hash != goldH || err != nil { t.Fatalf("ParseTree(...) = Tree{%d, %v}, %v, want Tree{%d, %v}, nil", tree.N, tree.Hash, err, goldN, goldH) } // Check invalid trees. var badTrees = []string{ "not-" + in, "go.sum database tree\n0xabcdef\nTszzRgjTG6xce+z2AG31kAXYKBgQVtCSCE40HmuwBb0=\n", "go.sum database tree\n123456789012\nTszzRgjTG6xce+z2AG31kAXYKBgQVtCSCE40HmuwBTOOBIG=\n", } for _, bad := range badTrees { _, err := ParseTree([]byte(bad)) if err == nil { t.Fatalf("ParseTree(%q) succeeded, want failure", in) } } // Check junk on end is ignored. var goodTrees = []string{ in + "JOE", in + "JOE\n", in + strings.Repeat("JOE\n", 1000), } for _, good := range goodTrees { _, err := ParseTree([]byte(good)) if tree.N != goldN || tree.Hash != goldH || err != nil { t.Fatalf("ParseTree(...+%q) = Tree{%d, %v}, %v, want Tree{%d, %v}, nil", good[len(in):], tree.N, tree.Hash, err, goldN, goldH) } } } func TestFormatRecord(t *testing.T) { id := int64(123456789012) text := "hello, world\n" golden := "123456789012\nhello, world\n\n" msg, err := FormatRecord(id, []byte(text)) if err != nil { t.Fatalf("FormatRecord: %v", err) } if string(msg) != golden { t.Fatalf("FormatRecord(...) = %q, want %q", msg, golden) } var badTexts = []string{ "", "hello\nworld", "hello\n\nworld\n", "hello\x01world\n", } for _, bad := range badTexts { msg, err := FormatRecord(id, []byte(bad)) if err == nil { t.Errorf("FormatRecord(id, %q) = %q, want error", bad, msg) } } } func TestParseRecord(t *testing.T) { in := "123456789012\nhello, world\n\njunk on end\x01\xff" goldID := int64(123456789012) goldText := "hello, world\n" goldRest := "junk on end\x01\xff" id, text, rest, err := ParseRecord([]byte(in)) if id != goldID || string(text) != goldText || string(rest) != goldRest || err != nil { t.Fatalf("ParseRecord(%q) = %d, %q, %q, %v, want %d, %q, %q, nil", in, id, text, rest, err, goldID, goldText, goldRest) } in = "123456789012\nhello, world\n\n" id, text, rest, err = ParseRecord([]byte(in)) if id != goldID || string(text) != goldText || len(rest) != 0 || err != nil { t.Fatalf("ParseRecord(%q) = %d, %q, %q, %v, want %d, %q, %q, nil", in, id, text, rest, err, goldID, goldText, "") } if rest == nil { t.Fatalf("ParseRecord(%q): rest = []byte(nil), want []byte{}", in) } // Check invalid records. var badRecords = []string{ "not-" + in, "123\nhello\x01world\n\n", "123\nhello\xffworld\n\n", "123\nhello world\n", "0x123\nhello world\n\n", } for _, bad := range badRecords { id, text, rest, err := ParseRecord([]byte(bad)) if err == nil { t.Fatalf("ParseRecord(%q) = %d, %q, %q, nil, want error", in, id, text, rest) } } } // FuzzParseTree tests that ParseTree never crashes func FuzzParseTree(f *testing.F) { f.Add([]byte("go.sum database tree\n123456789012\nTszzRgjTG6xce+z2AG31kAXYKBgQVtCSCE40HmuwBb0=\n")) f.Fuzz(func(t *testing.T, text []byte) { ParseTree(text) }) } // FuzzParseRecord tests that ParseRecord never crashes func FuzzParseRecord(f *testing.F) { f.Add([]byte("12345\nhello\n\n")) f.Fuzz(func(t *testing.T, msg []byte) { ParseRecord(msg) }) } mod-0.19.0/sumdb/tlog/tile.go000066400000000000000000000320441463702072700157360ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tlog import ( "fmt" "strconv" "strings" ) // A Tile is a description of a transparency log tile. // A tile of height H at level L offset N lists W consecutive hashes // at level H*L of the tree starting at offset N*(2**H). // A complete tile lists 2**H hashes; a partial tile lists fewer. // Note that a tile represents the entire subtree of height H // with those hashes as the leaves. The levels above H*L // can be reconstructed by hashing the leaves. // // Each Tile can be encoded as a “tile coordinate path†// of the form tile/H/L/NNN[.p/W]. // The .p/W suffix is present only for partial tiles, meaning W < 2**H. // The NNN element is an encoding of N into 3-digit path elements. // All but the last path element begins with an "x". // For example, // Tile{H: 3, L: 4, N: 1234067, W: 1}'s path // is tile/3/4/x001/x234/067.p/1, and // Tile{H: 3, L: 4, N: 1234067, W: 8}'s path // is tile/3/4/x001/x234/067. // See the [Tile.Path] method and the [ParseTilePath] function. // // The special level L=-1 holds raw record data instead of hashes. // In this case, the level encodes into a tile path as the path element // "data" instead of "-1". // // See also https://golang.org/design/25530-sumdb#checksum-database // and https://research.swtch.com/tlog#tiling_a_log. type Tile struct { H int // height of tile (1 ≤ H ≤ 30) L int // level in tiling (-1 ≤ L ≤ 63) N int64 // number within level (0 ≤ N, unbounded) W int // width of tile (1 ≤ W ≤ 2**H; 2**H is complete tile) } // TileForIndex returns the tile of fixed height h ≥ 1 // and least width storing the given hash storage index. // // If h ≤ 0, [TileForIndex] panics. func TileForIndex(h int, index int64) Tile { if h <= 0 { panic(fmt.Sprintf("TileForIndex: invalid height %d", h)) } t, _, _ := tileForIndex(h, index) return t } // tileForIndex returns the tile of height h ≥ 1 // storing the given hash index, which can be // reconstructed using tileHash(data[start:end]). func tileForIndex(h int, index int64) (t Tile, start, end int) { level, n := SplitStoredHashIndex(index) t.H = h t.L = level / h level -= t.L * h // now level within tile t.N = n << uint(level) >> uint(t.H) n -= t.N << uint(t.H) >> uint(level) // now n within tile at level t.W = int((n + 1) << uint(level)) return t, int(n< 30 || t.L < 0 || t.L >= 64 || t.W < 1 || t.W > 1<>(H*level) > 0; level++ { oldN := oldTreeSize >> (H * level) newN := newTreeSize >> (H * level) if oldN == newN { continue } for n := oldN >> H; n < newN>>H; n++ { tiles = append(tiles, Tile{H: h, L: int(level), N: n, W: 1 << H}) } n := newN >> H if w := int(newN - n< 0 { tiles = append(tiles, Tile{H: h, L: int(level), N: n, W: w}) } } return tiles } // ReadTileData reads the hashes for tile t from r // and returns the corresponding tile data. func ReadTileData(t Tile, r HashReader) ([]byte, error) { size := t.W if size == 0 { size = 1 << uint(t.H) } start := t.N << uint(t.H) indexes := make([]int64, size) for i := 0; i < size; i++ { indexes[i] = StoredHashIndex(t.H*t.L, start+int64(i)) } hashes, err := r.ReadHashes(indexes) if err != nil { return nil, err } if len(hashes) != len(indexes) { return nil, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(hashes)) } tile := make([]byte, size*HashSize) for i := 0; i < size; i++ { copy(tile[i*HashSize:], hashes[i][:]) } return tile, nil } // To limit the size of any particular directory listing, // we encode the (possibly very large) number N // by encoding three digits at a time. // For example, 123456789 encodes as x123/x456/789. // Each directory has at most 1000 each xNNN, NNN, and NNN.p children, // so there are at most 3000 entries in any one directory. const pathBase = 1000 // Path returns a tile coordinate path describing t. func (t Tile) Path() string { n := t.N nStr := fmt.Sprintf("%03d", n%pathBase) for n >= pathBase { n /= pathBase nStr = fmt.Sprintf("x%03d/%s", n%pathBase, nStr) } pStr := "" if t.W != 1< 30 { return Tile{}, &badPathError{path} } w := 1 << uint(h) if dotP := f[len(f)-2]; strings.HasSuffix(dotP, ".p") { ww, err := strconv.Atoi(f[len(f)-1]) if err != nil || ww <= 0 || ww >= w { return Tile{}, &badPathError{path} } w = ww f[len(f)-2] = dotP[:len(dotP)-len(".p")] f = f[:len(f)-1] } f = f[3:] n := int64(0) for _, s := range f { nn, err := strconv.Atoi(strings.TrimPrefix(s, "x")) if err != nil || nn < 0 || nn >= pathBase { return Tile{}, &badPathError{path} } n = n*pathBase + int64(nn) } if isData { l = -1 } t := Tile{H: h, L: l, N: n, W: w} if path != t.Path() { return Tile{}, &badPathError{path} } return t, nil } type badPathError struct { path string } func (e *badPathError) Error() string { return fmt.Sprintf("malformed tile path %q", e.path) } // A TileReader reads tiles from a go.sum database log. type TileReader interface { // Height returns the height of the available tiles. Height() int // ReadTiles returns the data for each requested tile. // If ReadTiles returns err == nil, it must also return // a data record for each tile (len(data) == len(tiles)) // and each data record must be the correct length // (len(data[i]) == tiles[i].W*HashSize). // // An implementation of ReadTiles typically reads // them from an on-disk cache or else from a remote // tile server. Tile data downloaded from a server should // be considered suspect and not saved into a persistent // on-disk cache before returning from ReadTiles. // When the client confirms the validity of the tile data, // it will call SaveTiles to signal that they can be safely // written to persistent storage. // See also https://research.swtch.com/tlog#authenticating_tiles. ReadTiles(tiles []Tile) (data [][]byte, err error) // SaveTiles informs the TileReader that the tile data // returned by ReadTiles has been confirmed as valid // and can be saved in persistent storage (on disk). SaveTiles(tiles []Tile, data [][]byte) } // TileHashReader returns a HashReader that satisfies requests // by loading tiles of the given tree. // // The returned [HashReader] checks that loaded tiles are // valid for the given tree. Therefore, any hashes returned // by the HashReader are already proven to be in the tree. func TileHashReader(tree Tree, tr TileReader) HashReader { return &tileHashReader{tree: tree, tr: tr} } type tileHashReader struct { tree Tree tr TileReader } // tileParent returns t's k'th tile parent in the tiles for a tree of size n. // If there is no such parent, tileParent returns Tile{}. func tileParent(t Tile, k int, n int64) Tile { t.L += k t.N >>= uint(k * t.H) t.W = 1 << uint(t.H) if max := n >> uint(t.L*t.H); t.N<= max { if t.N<= max { return Tile{} } t.W = int(max - t.N<= StoredHashIndex(0, r.tree.N) { return nil, fmt.Errorf("indexes not in tree") } tile, _, _ := tileForIndex(h, x) // Walk up parent tiles until we find one we've requested. // That one will be authenticated. k := 0 for ; ; k++ { p := tileParent(tile, k, r.tree.N) if j, ok := tileOrder[p]; ok { if k == 0 { indexTileOrder[i] = j } break } } // Walk back down recording child tiles after parents. // This loop ends by revisiting the tile for this index // (tileParent(tile, 0, r.tree.N)) unless k == 0, in which // case the previous loop did it. for k--; k >= 0; k-- { p := tileParent(tile, k, r.tree.N) if p.W != 1<= 0; i-- { h, err := HashFromTile(tiles[stxTileOrder[i]], data[stxTileOrder[i]], stx[i]) if err != nil { return nil, err } th = NodeHash(h, th) } if th != r.tree.Hash { // The tiles do not support the tree hash. // We know at least one is wrong, but not which one. return nil, fmt.Errorf("downloaded inconsistent tile") } // Authenticate full tiles against their parents. for i := len(stx); i < len(tiles); i++ { tile := tiles[i] p := tileParent(tile, 1, r.tree.N) j, ok := tileOrder[p] if !ok { return nil, fmt.Errorf("bad math in tileHashReader %d %v: lost parent of %v", r.tree.N, indexes, tile) } h, err := HashFromTile(p, data[j], StoredHashIndex(p.L*p.H, tile.N)) if err != nil { return nil, fmt.Errorf("bad math in tileHashReader %d %v: lost hash of %v: %v", r.tree.N, indexes, tile, err) } if h != tileHash(data[i]) { return nil, fmt.Errorf("downloaded inconsistent tile") } } // Now we have all the tiles needed for the requested hashes, // and we've authenticated the full tile set against the trusted tree hash. r.tr.SaveTiles(tiles, data) // Pull out the requested hashes. hashes := make([]Hash, len(indexes)) for i, x := range indexes { j := indexTileOrder[i] h, err := HashFromTile(tiles[j], data[j], x) if err != nil { return nil, fmt.Errorf("bad math in tileHashReader %d %v: lost hash %v: %v", r.tree.N, indexes, x, err) } hashes[i] = h } return hashes, nil } mod-0.19.0/sumdb/tlog/tile_test.go000066400000000000000000000021751463702072700167770ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tlog import ( "fmt" "testing" ) // FuzzParseTilePath tests that ParseTilePath never crashes func FuzzParseTilePath(f *testing.F) { f.Add("tile/4/0/001") f.Add("tile/4/0/001.p/5") f.Add("tile/3/5/x123/x456/078") f.Add("tile/3/5/x123/x456/078.p/2") f.Add("tile/1/0/x003/x057/500") f.Add("tile/3/5/123/456/078") f.Add("tile/3/-1/123/456/078") f.Add("tile/1/data/x003/x057/500") f.Fuzz(func(t *testing.T, path string) { ParseTilePath(path) }) } func TestNewTilesForSize(t *testing.T) { for _, tt := range []struct { old, new int64 want int }{ {1, 1, 0}, {100, 101, 1}, {1023, 1025, 3}, {1024, 1030, 1}, {1030, 2000, 1}, {1030, 10000, 10}, {49516517, 49516586, 3}, } { t.Run(fmt.Sprintf("%d-%d", tt.old, tt.new), func(t *testing.T) { tiles := NewTiles(10, tt.old, tt.new) if got := len(tiles); got != tt.want { t.Errorf("got %d, want %d", got, tt.want) for _, tile := range tiles { t.Logf("%+v", tile) } } }) } } mod-0.19.0/sumdb/tlog/tlog.go000066400000000000000000000442041463702072700157470ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package tlog implements a tamper-evident log // used in the Go module go.sum database server. // // This package follows the design of Certificate Transparency (RFC 6962) // and its proofs are compatible with that system. // See TestCertificateTransparency. package tlog import ( "crypto/sha256" "encoding/base64" "errors" "fmt" "math/bits" ) // A Hash is a hash identifying a log record or tree root. type Hash [HashSize]byte // HashSize is the size of a Hash in bytes. const HashSize = 32 // String returns a base64 representation of the hash for printing. func (h Hash) String() string { return base64.StdEncoding.EncodeToString(h[:]) } // MarshalJSON marshals the hash as a JSON string containing the base64-encoded hash. func (h Hash) MarshalJSON() ([]byte, error) { return []byte(`"` + h.String() + `"`), nil } // UnmarshalJSON unmarshals a hash from JSON string containing the a base64-encoded hash. func (h *Hash) UnmarshalJSON(data []byte) error { if len(data) != 1+44+1 || data[0] != '"' || data[len(data)-2] != '=' || data[len(data)-1] != '"' { return errors.New("cannot decode hash") } // As of Go 1.12, base64.StdEncoding.Decode insists on // slicing into target[33:] even when it only writes 32 bytes. // Since we already checked that the hash ends in = above, // we can use base64.RawStdEncoding with the = removed; // RawStdEncoding does not exhibit the same bug. // We decode into a temporary to avoid writing anything to *h // unless the entire input is well-formed. var tmp Hash n, err := base64.RawStdEncoding.Decode(tmp[:], data[1:len(data)-2]) if err != nil || n != HashSize { return errors.New("cannot decode hash") } *h = tmp return nil } // ParseHash parses the base64-encoded string form of a hash. func ParseHash(s string) (Hash, error) { data, err := base64.StdEncoding.DecodeString(s) if err != nil || len(data) != HashSize { return Hash{}, fmt.Errorf("malformed hash") } var h Hash copy(h[:], data) return h, nil } // maxpow2 returns k, the maximum power of 2 smaller than n, // as well as l = logâ‚‚ k (so k = 1< 0; l-- { n = 2*n + 1 } // Level 0's n'th hash is written at n+n/2+n/4+... (eventually n/2â± hits zero). i := int64(0) for ; n > 0; n >>= 1 { i += n } return i + int64(level) } // SplitStoredHashIndex is the inverse of [StoredHashIndex]. // That is, SplitStoredHashIndex(StoredHashIndex(level, n)) == level, n. func SplitStoredHashIndex(index int64) (level int, n int64) { // Determine level 0 record before index. // StoredHashIndex(0, n) < 2*n, // so the n we want is in [index/2, index/2+logâ‚‚(index)]. n = index / 2 indexN := StoredHashIndex(0, n) if indexN > index { panic("bad math") } for { // Each new record n adds 1 + trailingZeros(n) hashes. x := indexN + 1 + int64(bits.TrailingZeros64(uint64(n+1))) if x > index { break } n++ indexN = x } // The hash we want was committed with record n, // meaning it is one of (0, n), (1, n/2), (2, n/4), ... level = int(index - indexN) return level, n >> uint(level) } // StoredHashCount returns the number of stored hashes // that are expected for a tree with n records. func StoredHashCount(n int64) int64 { if n == 0 { return 0 } // The tree will have the hashes up to the last leaf hash. numHash := StoredHashIndex(0, n-1) + 1 // And it will have any hashes for subtrees completed by that leaf. for i := uint64(n - 1); i&1 != 0; i >>= 1 { numHash++ } return numHash } // StoredHashes returns the hashes that must be stored when writing // record n with the given data. The hashes should be stored starting // at StoredHashIndex(0, n). The result will have at most 1 + logâ‚‚ n hashes, // but it will average just under two per call for a sequence of calls for n=1..k. // // StoredHashes may read up to log n earlier hashes from r // in order to compute hashes for completed subtrees. func StoredHashes(n int64, data []byte, r HashReader) ([]Hash, error) { return StoredHashesForRecordHash(n, RecordHash(data), r) } // StoredHashesForRecordHash is like [StoredHashes] but takes // as its second argument RecordHash(data) instead of data itself. func StoredHashesForRecordHash(n int64, h Hash, r HashReader) ([]Hash, error) { // Start with the record hash. hashes := []Hash{h} // Build list of indexes needed for hashes for completed subtrees. // Each trailing 1 bit in the binary representation of n completes a subtree // and consumes a hash from an adjacent subtree. m := int(bits.TrailingZeros64(uint64(n + 1))) indexes := make([]int64, m) for i := 0; i < m; i++ { // We arrange indexes in sorted order. // Note that n>>i is always odd. indexes[m-1-i] = StoredHashIndex(i, n>>uint(i)-1) } // Fetch hashes. old, err := r.ReadHashes(indexes) if err != nil { return nil, err } if len(old) != len(indexes) { return nil, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(old)) } // Build new hashes. for i := 0; i < m; i++ { h = NodeHash(old[m-1-i], h) hashes = append(hashes, h) } return hashes, nil } // A HashReader can read hashes for nodes in the log's tree structure. type HashReader interface { // ReadHashes returns the hashes with the given stored hash indexes // (see StoredHashIndex and SplitStoredHashIndex). // ReadHashes must return a slice of hashes the same length as indexes, // or else it must return a non-nil error. // ReadHashes may run faster if indexes is sorted in increasing order. ReadHashes(indexes []int64) ([]Hash, error) } // A HashReaderFunc is a function implementing [HashReader]. type HashReaderFunc func([]int64) ([]Hash, error) func (f HashReaderFunc) ReadHashes(indexes []int64) ([]Hash, error) { return f(indexes) } // emptyHash is the hash of the empty tree, per RFC 6962, Section 2.1. // It is the hash of the empty string. var emptyHash = Hash{ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, } // TreeHash computes the hash for the root of the tree with n records, // using the HashReader to obtain previously stored hashes // (those returned by StoredHashes during the writes of those n records). // TreeHash makes a single call to ReadHash requesting at most 1 + logâ‚‚ n hashes. func TreeHash(n int64, r HashReader) (Hash, error) { if n == 0 { return emptyHash, nil } indexes := subTreeIndex(0, n, nil) hashes, err := r.ReadHashes(indexes) if err != nil { return Hash{}, err } if len(hashes) != len(indexes) { return Hash{}, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(hashes)) } hash, hashes := subTreeHash(0, n, hashes) if len(hashes) != 0 { panic("tlog: bad index math in TreeHash") } return hash, nil } // subTreeIndex returns the storage indexes needed to compute // the hash for the subtree containing records [lo, hi), // appending them to need and returning the result. // See https://tools.ietf.org/html/rfc6962#section-2.1 func subTreeIndex(lo, hi int64, need []int64) []int64 { // See subTreeHash below for commentary. for lo < hi { k, level := maxpow2(hi - lo + 1) if lo&(k-1) != 0 { panic("tlog: bad math in subTreeIndex") } need = append(need, StoredHashIndex(level, lo>>uint(level))) lo += k } return need } // subTreeHash computes the hash for the subtree containing records [lo, hi), // assuming that hashes are the hashes corresponding to the indexes // returned by subTreeIndex(lo, hi). // It returns any leftover hashes. func subTreeHash(lo, hi int64, hashes []Hash) (Hash, []Hash) { // Repeatedly partition the tree into a left side with 2^level nodes, // for as large a level as possible, and a right side with the fringe. // The left hash is stored directly and can be read from storage. // The right side needs further computation. numTree := 0 for lo < hi { k, _ := maxpow2(hi - lo + 1) if lo&(k-1) != 0 || lo >= hi { panic("tlog: bad math in subTreeHash") } numTree++ lo += k } if len(hashes) < numTree { panic("tlog: bad index math in subTreeHash") } // Reconstruct hash. h := hashes[numTree-1] for i := numTree - 2; i >= 0; i-- { h = NodeHash(hashes[i], h) } return h, hashes[numTree:] } // A RecordProof is a verifiable proof that a particular log root contains a particular record. // RFC 6962 calls this a “Merkle audit path.†type RecordProof []Hash // ProveRecord returns the proof that the tree of size t contains the record with index n. func ProveRecord(t, n int64, r HashReader) (RecordProof, error) { if t < 0 || n < 0 || n >= t { return nil, fmt.Errorf("tlog: invalid inputs in ProveRecord") } indexes := leafProofIndex(0, t, n, nil) if len(indexes) == 0 { return RecordProof{}, nil } hashes, err := r.ReadHashes(indexes) if err != nil { return nil, err } if len(hashes) != len(indexes) { return nil, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(hashes)) } p, hashes := leafProof(0, t, n, hashes) if len(hashes) != 0 { panic("tlog: bad index math in ProveRecord") } return p, nil } // leafProofIndex builds the list of indexes needed to construct the proof // that leaf n is contained in the subtree with leaves [lo, hi). // It appends those indexes to need and returns the result. // See https://tools.ietf.org/html/rfc6962#section-2.1.1 func leafProofIndex(lo, hi, n int64, need []int64) []int64 { // See leafProof below for commentary. if !(lo <= n && n < hi) { panic("tlog: bad math in leafProofIndex") } if lo+1 == hi { return need } if k, _ := maxpow2(hi - lo); n < lo+k { need = leafProofIndex(lo, lo+k, n, need) need = subTreeIndex(lo+k, hi, need) } else { need = subTreeIndex(lo, lo+k, need) need = leafProofIndex(lo+k, hi, n, need) } return need } // leafProof constructs the proof that leaf n is contained in the subtree with leaves [lo, hi). // It returns any leftover hashes as well. // See https://tools.ietf.org/html/rfc6962#section-2.1.1 func leafProof(lo, hi, n int64, hashes []Hash) (RecordProof, []Hash) { // We must have lo <= n < hi or else the code here has a bug. if !(lo <= n && n < hi) { panic("tlog: bad math in leafProof") } if lo+1 == hi { // n == lo // Reached the leaf node. // The verifier knows what the leaf hash is, so we don't need to send it. return RecordProof{}, hashes } // Walk down the tree toward n. // Record the hash of the path not taken (needed for verifying the proof). var p RecordProof var th Hash if k, _ := maxpow2(hi - lo); n < lo+k { // n is on left side p, hashes = leafProof(lo, lo+k, n, hashes) th, hashes = subTreeHash(lo+k, hi, hashes) } else { // n is on right side th, hashes = subTreeHash(lo, lo+k, hashes) p, hashes = leafProof(lo+k, hi, n, hashes) } return append(p, th), hashes } var errProofFailed = errors.New("invalid transparency proof") // CheckRecord verifies that p is a valid proof that the tree of size t // with hash th has an n'th record with hash h. func CheckRecord(p RecordProof, t int64, th Hash, n int64, h Hash) error { if t < 0 || n < 0 || n >= t { return fmt.Errorf("tlog: invalid inputs in CheckRecord") } th2, err := runRecordProof(p, 0, t, n, h) if err != nil { return err } if th2 == th { return nil } return errProofFailed } // runRecordProof runs the proof p that leaf n is contained in the subtree with leaves [lo, hi). // Running the proof means constructing and returning the implied hash of that // subtree. func runRecordProof(p RecordProof, lo, hi, n int64, leafHash Hash) (Hash, error) { // We must have lo <= n < hi or else the code here has a bug. if !(lo <= n && n < hi) { panic("tlog: bad math in runRecordProof") } if lo+1 == hi { // m == lo // Reached the leaf node. // The proof must not have any unnecessary hashes. if len(p) != 0 { return Hash{}, errProofFailed } return leafHash, nil } if len(p) == 0 { return Hash{}, errProofFailed } k, _ := maxpow2(hi - lo) if n < lo+k { th, err := runRecordProof(p[:len(p)-1], lo, lo+k, n, leafHash) if err != nil { return Hash{}, err } return NodeHash(th, p[len(p)-1]), nil } else { th, err := runRecordProof(p[:len(p)-1], lo+k, hi, n, leafHash) if err != nil { return Hash{}, err } return NodeHash(p[len(p)-1], th), nil } } // A TreeProof is a verifiable proof that a particular log tree contains // as a prefix all records present in an earlier tree. // RFC 6962 calls this a “Merkle consistency proof.†type TreeProof []Hash // ProveTree returns the proof that the tree of size t contains // as a prefix all the records from the tree of smaller size n. func ProveTree(t, n int64, h HashReader) (TreeProof, error) { if t < 1 || n < 1 || n > t { return nil, fmt.Errorf("tlog: invalid inputs in ProveTree") } indexes := treeProofIndex(0, t, n, nil) if len(indexes) == 0 { return TreeProof{}, nil } hashes, err := h.ReadHashes(indexes) if err != nil { return nil, err } if len(hashes) != len(indexes) { return nil, fmt.Errorf("tlog: ReadHashes(%d indexes) = %d hashes", len(indexes), len(hashes)) } p, hashes := treeProof(0, t, n, hashes) if len(hashes) != 0 { panic("tlog: bad index math in ProveTree") } return p, nil } // treeProofIndex builds the list of indexes needed to construct // the sub-proof related to the subtree containing records [lo, hi). // See https://tools.ietf.org/html/rfc6962#section-2.1.2. func treeProofIndex(lo, hi, n int64, need []int64) []int64 { // See treeProof below for commentary. if !(lo < n && n <= hi) { panic("tlog: bad math in treeProofIndex") } if n == hi { if lo == 0 { return need } return subTreeIndex(lo, hi, need) } if k, _ := maxpow2(hi - lo); n <= lo+k { need = treeProofIndex(lo, lo+k, n, need) need = subTreeIndex(lo+k, hi, need) } else { need = subTreeIndex(lo, lo+k, need) need = treeProofIndex(lo+k, hi, n, need) } return need } // treeProof constructs the sub-proof related to the subtree containing records [lo, hi). // It returns any leftover hashes as well. // See https://tools.ietf.org/html/rfc6962#section-2.1.2. func treeProof(lo, hi, n int64, hashes []Hash) (TreeProof, []Hash) { // We must have lo < n <= hi or else the code here has a bug. if !(lo < n && n <= hi) { panic("tlog: bad math in treeProof") } // Reached common ground. if n == hi { if lo == 0 { // This subtree corresponds exactly to the old tree. // The verifier knows that hash, so we don't need to send it. return TreeProof{}, hashes } th, hashes := subTreeHash(lo, hi, hashes) return TreeProof{th}, hashes } // Interior node for the proof. // Decide whether to walk down the left or right side. var p TreeProof var th Hash if k, _ := maxpow2(hi - lo); n <= lo+k { // m is on left side p, hashes = treeProof(lo, lo+k, n, hashes) th, hashes = subTreeHash(lo+k, hi, hashes) } else { // m is on right side th, hashes = subTreeHash(lo, lo+k, hashes) p, hashes = treeProof(lo+k, hi, n, hashes) } return append(p, th), hashes } // CheckTree verifies that p is a valid proof that the tree of size t with hash th // contains as a prefix the tree of size n with hash h. func CheckTree(p TreeProof, t int64, th Hash, n int64, h Hash) error { if t < 1 || n < 1 || n > t { return fmt.Errorf("tlog: invalid inputs in CheckTree") } h2, th2, err := runTreeProof(p, 0, t, n, h) if err != nil { return err } if th2 == th && h2 == h { return nil } return errProofFailed } // runTreeProof runs the sub-proof p related to the subtree containing records [lo, hi), // where old is the hash of the old tree with n records. // Running the proof means constructing and returning the implied hashes of that // subtree in both the old and new tree. func runTreeProof(p TreeProof, lo, hi, n int64, old Hash) (Hash, Hash, error) { // We must have lo < n <= hi or else the code here has a bug. if !(lo < n && n <= hi) { panic("tlog: bad math in runTreeProof") } // Reached common ground. if n == hi { if lo == 0 { if len(p) != 0 { return Hash{}, Hash{}, errProofFailed } return old, old, nil } if len(p) != 1 { return Hash{}, Hash{}, errProofFailed } return p[0], p[0], nil } if len(p) == 0 { return Hash{}, Hash{}, errProofFailed } // Interior node for the proof. k, _ := maxpow2(hi - lo) if n <= lo+k { oh, th, err := runTreeProof(p[:len(p)-1], lo, lo+k, n, old) if err != nil { return Hash{}, Hash{}, err } return oh, NodeHash(th, p[len(p)-1]), nil } else { oh, th, err := runTreeProof(p[:len(p)-1], lo+k, hi, n, old) if err != nil { return Hash{}, Hash{}, err } return NodeHash(p[len(p)-1], oh), NodeHash(p[len(p)-1], th), nil } } mod-0.19.0/sumdb/tlog/tlog_test.go000066400000000000000000000176141463702072700170130ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package tlog import ( "bytes" "crypto/sha256" "fmt" "testing" ) type testHashStorage []Hash func (t testHashStorage) ReadHash(level int, n int64) (Hash, error) { return t[StoredHashIndex(level, n)], nil } func (t testHashStorage) ReadHashes(index []int64) ([]Hash, error) { // It's not required by HashReader that indexes be in increasing order, // but check that the functions we are testing only ever ask for // indexes in increasing order. for i := 1; i < len(index); i++ { if index[i-1] >= index[i] { panic("indexes out of order") } } out := make([]Hash, len(index)) for i, x := range index { out[i] = t[x] } return out, nil } type testTilesStorage struct { unsaved int m map[Tile][]byte } func (t testTilesStorage) Height() int { return 2 } func (t *testTilesStorage) SaveTiles(tiles []Tile, data [][]byte) { t.unsaved -= len(tiles) } func (t *testTilesStorage) ReadTiles(tiles []Tile) ([][]byte, error) { out := make([][]byte, len(tiles)) for i, tile := range tiles { out[i] = t.m[tile] } t.unsaved += len(tiles) return out, nil } func TestTree(t *testing.T) { var trees []Hash var leafhashes []Hash var storage testHashStorage tiles := make(map[Tile][]byte) const testH = 2 for i := int64(0); i < 100; i++ { data := []byte(fmt.Sprintf("leaf %d", i)) hashes, err := StoredHashes(i, data, storage) if err != nil { t.Fatal(err) } leafhashes = append(leafhashes, RecordHash(data)) oldStorage := len(storage) storage = append(storage, hashes...) if count := StoredHashCount(i + 1); count != int64(len(storage)) { t.Errorf("StoredHashCount(%d) = %d, have %d StoredHashes", i+1, count, len(storage)) } th, err := TreeHash(i+1, storage) if err != nil { t.Fatal(err) } for _, tile := range NewTiles(testH, i, i+1) { data, err := ReadTileData(tile, storage) if err != nil { t.Fatal(err) } old := Tile{H: tile.H, L: tile.L, N: tile.N, W: tile.W - 1} oldData := tiles[old] if len(oldData) != len(data)-HashSize || !bytes.Equal(oldData, data[:len(oldData)]) { t.Fatalf("tile %v not extending earlier tile %v", tile.Path(), old.Path()) } tiles[tile] = data } for _, tile := range NewTiles(testH, 0, i+1) { data, err := ReadTileData(tile, storage) if err != nil { t.Fatal(err) } if !bytes.Equal(tiles[tile], data) { t.Fatalf("mismatch at %+v", tile) } } for _, tile := range NewTiles(testH, i/2, i+1) { data, err := ReadTileData(tile, storage) if err != nil { t.Fatal(err) } if !bytes.Equal(tiles[tile], data) { t.Fatalf("mismatch at %+v", tile) } } // Check that all the new hashes are readable from their tiles. for j := oldStorage; j < len(storage); j++ { tile := TileForIndex(testH, int64(j)) data, ok := tiles[tile] if !ok { t.Log(NewTiles(testH, 0, i+1)) t.Fatalf("TileForIndex(%d, %d) = %v, not yet stored (i=%d, stored %d)", testH, j, tile.Path(), i, len(storage)) continue } h, err := HashFromTile(tile, data, int64(j)) if err != nil { t.Fatal(err) } if h != storage[j] { t.Errorf("HashFromTile(%v, %d) = %v, want %v", tile.Path(), int64(j), h, storage[j]) } } trees = append(trees, th) // Check that leaf proofs work, for all trees and leaves so far. for j := int64(0); j <= i; j++ { p, err := ProveRecord(i+1, j, storage) if err != nil { t.Fatalf("ProveRecord(%d, %d): %v", i+1, j, err) } if err := CheckRecord(p, i+1, th, j, leafhashes[j]); err != nil { t.Fatalf("CheckRecord(%d, %d): %v", i+1, j, err) } for k := range p { p[k][0] ^= 1 if err := CheckRecord(p, i+1, th, j, leafhashes[j]); err == nil { t.Fatalf("CheckRecord(%d, %d) succeeded with corrupt proof hash #%d!", i+1, j, k) } p[k][0] ^= 1 } } // Check that leaf proofs work using TileReader. // To prove a leaf that way, all you have to do is read and verify its hash. storage := &testTilesStorage{m: tiles} thr := TileHashReader(Tree{i + 1, th}, storage) for j := int64(0); j <= i; j++ { h, err := thr.ReadHashes([]int64{StoredHashIndex(0, j)}) if err != nil { t.Fatalf("TileHashReader(%d).ReadHashes(%d): %v", i+1, j, err) } if h[0] != leafhashes[j] { t.Fatalf("TileHashReader(%d).ReadHashes(%d) returned wrong hash", i+1, j) } // Even though reading the hash suffices, // check we can generate the proof too. p, err := ProveRecord(i+1, j, thr) if err != nil { t.Fatalf("ProveRecord(%d, %d, TileHashReader(%d)): %v", i+1, j, i+1, err) } if err := CheckRecord(p, i+1, th, j, leafhashes[j]); err != nil { t.Fatalf("CheckRecord(%d, %d, TileHashReader(%d)): %v", i+1, j, i+1, err) } } if storage.unsaved != 0 { t.Fatalf("TileHashReader(%d) did not save %d tiles", i+1, storage.unsaved) } // Check that ReadHashes will give an error if the index is not in the tree. if _, err := thr.ReadHashes([]int64{(i + 1) * 2}); err == nil { t.Fatalf("TileHashReader(%d).ReadHashes(%d) for index not in tree , want err", i, i+1) } if storage.unsaved != 0 { t.Fatalf("TileHashReader(%d) did not save %d tiles", i+1, storage.unsaved) } // Check that tree proofs work, for all trees so far, using TileReader. // To prove a tree that way, all you have to do is compute and verify its hash. for j := int64(0); j <= i; j++ { h, err := TreeHash(j+1, thr) if err != nil { t.Fatalf("TreeHash(%d, TileHashReader(%d)): %v", j, i+1, err) } if h != trees[j] { t.Fatalf("TreeHash(%d, TileHashReader(%d)) = %x, want %x (%v)", j, i+1, h[:], trees[j][:], trees[j]) } // Even though computing the subtree hash suffices, // check that we can generate the proof too. p, err := ProveTree(i+1, j+1, thr) if err != nil { t.Fatalf("ProveTree(%d, %d): %v", i+1, j+1, err) } if err := CheckTree(p, i+1, th, j+1, trees[j]); err != nil { t.Fatalf("CheckTree(%d, %d): %v [%v]", i+1, j+1, err, p) } for k := range p { p[k][0] ^= 1 if err := CheckTree(p, i+1, th, j+1, trees[j]); err == nil { t.Fatalf("CheckTree(%d, %d) succeeded with corrupt proof hash #%d!", i+1, j+1, k) } p[k][0] ^= 1 } } if storage.unsaved != 0 { t.Fatalf("TileHashReader(%d) did not save %d tiles", i+1, storage.unsaved) } } } func TestSplitStoredHashIndex(t *testing.T) { for l := 0; l < 10; l++ { for n := int64(0); n < 100; n++ { x := StoredHashIndex(l, n) l1, n1 := SplitStoredHashIndex(x) if l1 != l || n1 != n { t.Fatalf("StoredHashIndex(%d, %d) = %d, but SplitStoredHashIndex(%d) = %d, %d", l, n, x, x, l1, n1) } } } } // TODO(rsc): Test invalid paths too, like "tile/3/5/123/456/078". var tilePaths = []struct { path string tile Tile }{ {"tile/4/0/001", Tile{4, 0, 1, 16}}, {"tile/4/0/001.p/5", Tile{4, 0, 1, 5}}, {"tile/3/5/x123/x456/078", Tile{3, 5, 123456078, 8}}, {"tile/3/5/x123/x456/078.p/2", Tile{3, 5, 123456078, 2}}, {"tile/1/0/x003/x057/500", Tile{1, 0, 3057500, 2}}, {"tile/3/5/123/456/078", Tile{}}, {"tile/3/-1/123/456/078", Tile{}}, {"tile/1/data/x003/x057/500", Tile{1, -1, 3057500, 2}}, } func TestTilePath(t *testing.T) { for _, tt := range tilePaths { if tt.tile.H > 0 { p := tt.tile.Path() if p != tt.path { t.Errorf("%+v.Path() = %q, want %q", tt.tile, p, tt.path) } } tile, err := ParseTilePath(tt.path) if err != nil { if tt.tile.H == 0 { // Expected error. continue } t.Errorf("ParseTilePath(%q): %v", tt.path, err) } else if tile != tt.tile { if tt.tile.H == 0 { t.Errorf("ParseTilePath(%q): expected error, got %+v", tt.path, tt.tile) continue } t.Errorf("ParseTilePath(%q) = %+v, want %+v", tt.path, tile, tt.tile) } } } func TestEmptyTree(t *testing.T) { h, err := TreeHash(0, nil) if err != nil { t.Fatal(err) } if h != sha256.Sum256(nil) { t.Fatalf("TreeHash(0) = %x, want SHA-256('')", h) } } mod-0.19.0/zip/000077500000000000000000000000001463702072700131725ustar00rootroot00000000000000mod-0.19.0/zip/testdata/000077500000000000000000000000001463702072700150035ustar00rootroot00000000000000mod-0.19.0/zip/testdata/check_dir/000077500000000000000000000000001463702072700167165ustar00rootroot00000000000000mod-0.19.0/zip/testdata/check_dir/empty.txt000066400000000000000000000000461463702072700206150ustar00rootroot00000000000000-- want -- valid: omitted: invalid: mod-0.19.0/zip/testdata/check_dir/various.txt000066400000000000000000000010031463702072700211410ustar00rootroot00000000000000-- want -- valid: $work/valid.go omitted: $work/.hg_archival.txt: file is inserted by 'hg archive' and is always omitted $work/.git: directory is a version control repository $work/sub: directory is in another module $work/vendor/x/y: file is in vendor directory invalid: $work/GO.MOD: go.mod files must have lowercase names $work/invalid.go': malformed file path "invalid.go'": invalid char '\'' -- valid.go -- -- GO.MOD -- -- invalid.go' -- -- vendor/x/y -- -- sub/go.mod -- -- .hg_archival.txt -- -- .git/x -- mod-0.19.0/zip/testdata/check_files/000077500000000000000000000000001463702072700172425ustar00rootroot00000000000000mod-0.19.0/zip/testdata/check_files/empty.txt000066400000000000000000000000461463702072700211410ustar00rootroot00000000000000-- want -- valid: omitted: invalid: mod-0.19.0/zip/testdata/check_files/various.txt000066400000000000000000000010771463702072700215000ustar00rootroot00000000000000-- want -- valid: valid.go omitted: vendor/x/y: file is in vendor directory sub/go.mod: file is in another module .hg_archival.txt: file is inserted by 'hg archive' and is always omitted invalid: not/../clean: file path is not clean GO.MOD: go.mod files must have lowercase names invalid.go': malformed file path "invalid.go'": invalid char '\'' valid.go: multiple entries for file "valid.go" -- valid.go -- -- not/../clean -- -- GO.MOD -- -- invalid.go' -- -- vendor/x/y -- -- sub/go.mod -- -- .hg_archival.txt -- -- valid.go -- duplicate -- valid.go -- another duplicate mod-0.19.0/zip/testdata/check_zip/000077500000000000000000000000001463702072700167425ustar00rootroot00000000000000mod-0.19.0/zip/testdata/check_zip/empty.txt000066400000000000000000000001141463702072700206350ustar00rootroot00000000000000path=example.com/empty version=v1.0.0 -- want -- valid: omitted: invalid: mod-0.19.0/zip/testdata/check_zip/various.txt000066400000000000000000000013251463702072700211740ustar00rootroot00000000000000path=example.com/various version=v1.0.0 -- want -- valid: example.com/various@v1.0.0/valid.go omitted: invalid: noprefix: path does not have prefix "example.com/various@v1.0.0/" example.com/various@v1.0.0/not/../clean: file path is not clean example.com/various@v1.0.0/invalid.go': malformed file path "invalid.go'": invalid char '\'' example.com/various@v1.0.0/GO.MOD: go.mod files must have lowercase names example.com/various@v1.0.0/valid.go: multiple entries for file "valid.go" -- noprefix -- -- example.com/various@v1.0.0/valid.go -- -- example.com/various@v1.0.0/not/../clean -- -- example.com/various@v1.0.0/invalid.go' -- -- example.com/various@v1.0.0/GO.MOD -- -- example.com/various@v1.0.0/valid.go -- duplicate mod-0.19.0/zip/testdata/create/000077500000000000000000000000001463702072700162465ustar00rootroot00000000000000mod-0.19.0/zip/testdata/create/bad_file_path.txt000066400000000000000000000001651463702072700215520ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=malformed file path "bad.go'": invalid char '\'' -- bad.go' -- package bad mod-0.19.0/zip/testdata/create/bad_gomod_case.txt000066400000000000000000000001731463702072700217160ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=GO.MOD: go.mod files must have lowercase names -- GO.MOD -- module example.com/m mod-0.19.0/zip/testdata/create/bad_mod_path.txt000066400000000000000000000001041463702072700214030ustar00rootroot00000000000000path=cache version=v1.0.0 wantErr=missing dot in first path element mod-0.19.0/zip/testdata/create/bad_mod_path_version_suffix.txt000066400000000000000000000001261463702072700245400ustar00rootroot00000000000000path=example.com/m version=v2.0.0 wantErr=invalid version: should be v0 or v1, not v2 mod-0.19.0/zip/testdata/create/bad_version.txt000066400000000000000000000001511463702072700212770ustar00rootroot00000000000000path=example.com/m version=v1.0.0+bad wantErr=version "v1.0.0+bad" is not canonical (should be "v1.0.0") mod-0.19.0/zip/testdata/create/dup_file.txt000066400000000000000000000001751463702072700206010ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=multiple entries for file "dup.go" -- dup.go -- package d1 -- dup.go -- package d2 mod-0.19.0/zip/testdata/create/dup_file_and_dir.txt000066400000000000000000000002051463702072700222530ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=entry "a.go" is both a file and a directory -- a.go -- package a -- a.go/b.go -- package b mod-0.19.0/zip/testdata/create/empty.txt000066400000000000000000000001331463702072700201420ustar00rootroot00000000000000path=example.com/empty version=v1.0.0 hash=h1:47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU= mod-0.19.0/zip/testdata/create/exclude_cap_go_mod_submodule.txt000066400000000000000000000002511463702072700246640ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:xctQQey8/y7IcBjFZDP/onWLSXhlqcsC3i1fgSdpMHk= -- a.go -- package a -- b/GO.MOD -- MODULE EXAMPLE.COM/M/B -- b/b.go -- package b mod-0.19.0/zip/testdata/create/exclude_submodule.txt000066400000000000000000000003451463702072700225210ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:XduFAgX/GaspZa8Jv4pfzoGEzNaU/r88PiCunijw5ok= -- go.mod -- module example.com/m go 1.13 -- sub/go.mod -- module example.com/m/sub -- sub/x.go' -- invalid name, but this shouldn't be read mod-0.19.0/zip/testdata/create/exclude_vendor.txt000066400000000000000000000004541463702072700220200ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:5u93LDLN0Me+NGfZtRpA5mHxY8svfykHpq4CMSaBZyc= -- go.mod -- module example.com/m go 1.13 -- vendor/modules.txt -- included see comment in isVendoredPackage and golang.org/issue/31562. -- vendor/example.com/x/x.go -- excluded -- sub/vendor/sub.txt -- excluded mod-0.19.0/zip/testdata/create/file_case_conflict.txt000066400000000000000000000002141463702072700225770ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=case-insensitive file name collision: "m.go" and "M.GO" -- m.go -- package m -- M.GO -- package m mod-0.19.0/zip/testdata/create/go_mod_dir.txt000066400000000000000000000001631463702072700211110ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:Mun5l9cBlDnnV6JasTpio2aZJSbFj++h+814mnKC/OM= -- go.mod/a.go -- package a mod-0.19.0/zip/testdata/create/invalid_utf8_mod_path.txt000066400000000000000000000001221463702072700232510ustar00rootroot00000000000000path=ÿ # 0xFF version=v1.0.0 wantErr=malformed module path "\xff": invalid UTF-8 mod-0.19.0/zip/testdata/create/simple.txt000066400000000000000000000004511463702072700203000ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:tpqYOOmuilagXzyqoJ3roUjp8gneQeTv5YVpL6BG7/k= -- go.mod -- module example.com/m go 1.13 -- m.go -- package m func Foo() int { return 42 } -- cmd/hello/hello.go -- package main import ( "fmt" "example.com/m" ) func main() { fmt.Println(m.Foo()) } mod-0.19.0/zip/testdata/create_from_dir/000077500000000000000000000000001463702072700201275ustar00rootroot00000000000000mod-0.19.0/zip/testdata/create_from_dir/bad_file_path.txt000066400000000000000000000001651463702072700234330ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=malformed file path "bad.go'": invalid char '\'' -- bad.go' -- package bad mod-0.19.0/zip/testdata/create_from_dir/bad_gomod_case.txt000066400000000000000000000001731463702072700235770ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=GO.MOD: go.mod files must have lowercase names -- GO.MOD -- module example.com/m mod-0.19.0/zip/testdata/create_from_dir/bad_mod_path.txt000066400000000000000000000001041463702072700232640ustar00rootroot00000000000000path=cache version=v1.0.0 wantErr=missing dot in first path element mod-0.19.0/zip/testdata/create_from_dir/bad_mod_path_version_suffix.txt000066400000000000000000000001261463702072700264210ustar00rootroot00000000000000path=example.com/m version=v2.0.0 wantErr=invalid version: should be v0 or v1, not v2 mod-0.19.0/zip/testdata/create_from_dir/bad_version.txt000066400000000000000000000001511463702072700231600ustar00rootroot00000000000000path=example.com/m version=v1.0.0+bad wantErr=version "v1.0.0+bad" is not canonical (should be "v1.0.0") mod-0.19.0/zip/testdata/create_from_dir/empty.txt000066400000000000000000000001331463702072700220230ustar00rootroot00000000000000path=example.com/empty version=v1.0.0 hash=h1:47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU= mod-0.19.0/zip/testdata/create_from_dir/exclude_submodule.txt000066400000000000000000000003451463702072700244020ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:XduFAgX/GaspZa8Jv4pfzoGEzNaU/r88PiCunijw5ok= -- go.mod -- module example.com/m go 1.13 -- sub/go.mod -- module example.com/m/sub -- sub/x.go' -- invalid name, but this shouldn't be read mod-0.19.0/zip/testdata/create_from_dir/exclude_vcs.txt000066400000000000000000000003021463702072700231670ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU= -- .bzr/exclude -- exclude -- .git/exclude -- exclude -- .hg/exclude -- exclude -- .svn/exclude -- exclude mod-0.19.0/zip/testdata/create_from_dir/exclude_vendor.txt000066400000000000000000000004541463702072700237010ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:5u93LDLN0Me+NGfZtRpA5mHxY8svfykHpq4CMSaBZyc= -- go.mod -- module example.com/m go 1.13 -- vendor/modules.txt -- included see comment in isVendoredPackage and golang.org/issue/31562. -- vendor/example.com/x/x.go -- excluded -- sub/vendor/sub.txt -- excluded mod-0.19.0/zip/testdata/create_from_dir/go_mod_dir.txt000066400000000000000000000001631463702072700227720ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:Mun5l9cBlDnnV6JasTpio2aZJSbFj++h+814mnKC/OM= -- go.mod/a.go -- package a mod-0.19.0/zip/testdata/create_from_dir/invalid_utf8_mod_path.txt000066400000000000000000000001221463702072700251320ustar00rootroot00000000000000path=ÿ # 0xFF version=v1.0.0 wantErr=malformed module path "\xff": invalid UTF-8 mod-0.19.0/zip/testdata/create_from_dir/simple.txt000066400000000000000000000004511463702072700221610ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:tpqYOOmuilagXzyqoJ3roUjp8gneQeTv5YVpL6BG7/k= -- go.mod -- module example.com/m go 1.13 -- m.go -- package m func Foo() int { return 42 } -- cmd/hello/hello.go -- package main import ( "fmt" "example.com/m" ) func main() { fmt.Println(m.Foo()) } mod-0.19.0/zip/testdata/unzip/000077500000000000000000000000001463702072700161505ustar00rootroot00000000000000mod-0.19.0/zip/testdata/unzip/bad_file_path.txt000066400000000000000000000002121463702072700214450ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=malformed file path "bad.go'": invalid char '\'' -- example.com/m@v1.0.0/bad.go' -- package bad mod-0.19.0/zip/testdata/unzip/bad_gomod_case.txt000066400000000000000000000002101463702072700216100ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=go.mod files must have lowercase names -- example.com/m@v1.0.0/GO.MOD -- module example.com/m mod-0.19.0/zip/testdata/unzip/bad_mod_path.txt000066400000000000000000000001041463702072700213050ustar00rootroot00000000000000path=cache version=v1.0.0 wantErr=missing dot in first path element mod-0.19.0/zip/testdata/unzip/bad_mod_path_version_suffix.txt000066400000000000000000000001261463702072700244420ustar00rootroot00000000000000path=example.com/m version=v2.0.0 wantErr=invalid version: should be v0 or v1, not v2 mod-0.19.0/zip/testdata/unzip/bad_submodule.txt000066400000000000000000000003221463702072700215130ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=go.mod file not in module root directory -- example.com/m@v1.0.0/go.mod -- module example.com/m go 1.13 -- example.com/m@v1.0.0/sub/go.mod -- module example.com/m/sub mod-0.19.0/zip/testdata/unzip/bad_version.txt000066400000000000000000000001511463702072700212010ustar00rootroot00000000000000path=example.com/m version=v1.0.0+bad wantErr=version "v1.0.0+bad" is not canonical (should be "v1.0.0") mod-0.19.0/zip/testdata/unzip/cap_go_mod_not_submodule.txt000066400000000000000000000003441463702072700237400ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=go.mod file not in module root directory -- example.com/m@v1.0.0/a.go -- package a -- example.com/m@v1.0.0/b/GO.MOD -- MODULE EXAMPLE.COM/M/B -- example.com/m@v1.0.0/b/b.go -- package b mod-0.19.0/zip/testdata/unzip/dup_file.txt000066400000000000000000000002471463702072700205030ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=multiple entries for file "dup.go" -- example.com/m@v1.0.0/dup.go -- package d1 -- example.com/m@v1.0.0/dup.go -- package d2 mod-0.19.0/zip/testdata/unzip/dup_file_and_dir.txt000066400000000000000000000002571463702072700221640ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=entry "a.go" is both a file and a directory -- example.com/m@v1.0.0/a.go -- package a -- example.com/m@v1.0.0/a.go/b.go -- package b mod-0.19.0/zip/testdata/unzip/empty.txt000066400000000000000000000001331463702072700200440ustar00rootroot00000000000000path=example.com/empty version=v1.0.0 hash=h1:47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU= mod-0.19.0/zip/testdata/unzip/file_case_conflict.txt000066400000000000000000000002661463702072700225100ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=case-insensitive file name collision: "m.go" and "M.GO" -- example.com/m@v1.0.0/m.go -- package m -- example.com/m@v1.0.0/M.GO -- package m mod-0.19.0/zip/testdata/unzip/go_mod_dir.txt000066400000000000000000000002101463702072700210040ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:Mun5l9cBlDnnV6JasTpio2aZJSbFj++h+814mnKC/OM= -- example.com/m@v1.0.0/go.mod/a.go -- package a mod-0.19.0/zip/testdata/unzip/invalid_utf8_mod_path.txt000066400000000000000000000001221463702072700231530ustar00rootroot00000000000000path=ÿ # 0xFF version=v1.0.0 wantErr=malformed module path "\xff": invalid UTF-8 mod-0.19.0/zip/testdata/unzip/prefix_only.txt000066400000000000000000000003041463702072700212440ustar00rootroot00000000000000path=example.com/m version=v1.0.0 wantErr=example.com/m@v1.0.0: path does not have prefix "example.com/m@v1.0.0/" -- example.com/m@v1.0.0 -- -- example.com/m@v1.0.0/go.mod -- module example.com/m mod-0.19.0/zip/testdata/unzip/simple.txt000066400000000000000000000005501463702072700202020ustar00rootroot00000000000000path=example.com/m version=v1.0.0 hash=h1:tpqYOOmuilagXzyqoJ3roUjp8gneQeTv5YVpL6BG7/k= -- example.com/m@v1.0.0/go.mod -- module example.com/m go 1.13 -- example.com/m@v1.0.0/m.go -- package m func Foo() int { return 42 } -- example.com/m@v1.0.0/cmd/hello/hello.go -- package main import ( "fmt" "example.com/m" ) func main() { fmt.Println(m.Foo()) } mod-0.19.0/zip/vendor_test.go000066400000000000000000000023251463702072700160570ustar00rootroot00000000000000// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package zip import "testing" func TestIsVendoredPackage(t *testing.T) { for _, tc := range []struct { path string want bool falsePositive bool // is this case affected by https://golang.org/issue/37397? }{ {path: "vendor/foo/foo.go", want: true}, {path: "pkg/vendor/foo/foo.go", want: true}, {path: "longpackagename/vendor/foo/foo.go", want: true}, {path: "vendor/vendor.go", want: false}, // We ideally want these cases to be false, but they are affected by // https://golang.org/issue/37397, and if we fix them we will invalidate // existing module checksums. We must leave them as-is-for now. {path: "pkg/vendor/vendor.go", falsePositive: true}, {path: "longpackagename/vendor/vendor.go", falsePositive: true}, } { got := isVendoredPackage(tc.path) want := tc.want if tc.falsePositive { want = true } if got != want { t.Errorf("isVendoredPackage(%q) = %t; want %t", tc.path, got, tc.want) if tc.falsePositive { t.Logf("(Expected a false-positive due to https://golang.org/issue/37397.)") } } } } mod-0.19.0/zip/zip.go000066400000000000000000000757251463702072700143430ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package zip provides functions for creating and extracting module zip files. // // Module zip files have several restrictions listed below. These are necessary // to ensure that module zip files can be extracted consistently on supported // platforms and file systems. // // • All file paths within a zip file must start with "@/", // where "" is the module path and "" is the version. // The module path must be valid (see [golang.org/x/mod/module.CheckPath]). // The version must be valid and canonical (see // [golang.org/x/mod/module.CanonicalVersion]). The path must have a major // version suffix consistent with the version (see // [golang.org/x/mod/module.Check]). The part of the file path after the // "@/" prefix must be valid (see // [golang.org/x/mod/module.CheckFilePath]). // // • No two file paths may be equal under Unicode case-folding (see // [strings.EqualFold]). // // • A go.mod file may or may not appear in the top-level directory. If present, // it must be named "go.mod", not any other case. Files named "go.mod" // are not allowed in any other directory. // // • The total size in bytes of a module zip file may be at most [MaxZipFile] // bytes (500 MiB). The total uncompressed size of the files within the // zip may also be at most [MaxZipFile] bytes. // // • Each file's uncompressed size must match its declared 64-bit uncompressed // size in the zip file header. // // • If the zip contains files named "@/go.mod" or // "@/LICENSE", their sizes in bytes may be at most // [MaxGoMod] or [MaxLICENSE], respectively (both are 16 MiB). // // • Empty directories are ignored. File permissions and timestamps are also // ignored. // // • Symbolic links and other irregular files are not allowed. // // Note that this package does not provide hashing functionality. See // [golang.org/x/mod/sumdb/dirhash]. package zip import ( "archive/zip" "bytes" "errors" "fmt" "io" "os" "os/exec" "path" "path/filepath" "strings" "time" "unicode" "unicode/utf8" "golang.org/x/mod/module" ) const ( // MaxZipFile is the maximum size in bytes of a module zip file. The // go command will report an error if either the zip file or its extracted // content is larger than this. MaxZipFile = 500 << 20 // MaxGoMod is the maximum size in bytes of a go.mod file within a // module zip file. MaxGoMod = 16 << 20 // MaxLICENSE is the maximum size in bytes of a LICENSE file within a // module zip file. MaxLICENSE = 16 << 20 ) // File provides an abstraction for a file in a directory, zip, or anything // else that looks like a file. type File interface { // Path returns a clean slash-separated relative path from the module root // directory to the file. Path() string // Lstat returns information about the file. If the file is a symbolic link, // Lstat returns information about the link itself, not the file it points to. Lstat() (os.FileInfo, error) // Open provides access to the data within a regular file. Open may return // an error if called on a directory or symbolic link. Open() (io.ReadCloser, error) } // CheckedFiles reports whether a set of files satisfy the name and size // constraints required by module zip files. The constraints are listed in the // package documentation. // // Functions that produce this report may include slightly different sets of // files. See documentation for CheckFiles, CheckDir, and CheckZip for details. type CheckedFiles struct { // Valid is a list of file paths that should be included in a zip file. Valid []string // Omitted is a list of files that are ignored when creating a module zip // file, along with the reason each file is ignored. Omitted []FileError // Invalid is a list of files that should not be included in a module zip // file, along with the reason each file is invalid. Invalid []FileError // SizeError is non-nil if the total uncompressed size of the valid files // exceeds the module zip size limit or if the zip file itself exceeds the // limit. SizeError error } // Err returns an error if [CheckedFiles] does not describe a valid module zip // file. [CheckedFiles.SizeError] is returned if that field is set. // A [FileErrorList] is returned // if there are one or more invalid files. Other errors may be returned in the // future. func (cf CheckedFiles) Err() error { if cf.SizeError != nil { return cf.SizeError } if len(cf.Invalid) > 0 { return FileErrorList(cf.Invalid) } return nil } type FileErrorList []FileError func (el FileErrorList) Error() string { buf := &strings.Builder{} sep := "" for _, e := range el { buf.WriteString(sep) buf.WriteString(e.Error()) sep = "\n" } return buf.String() } type FileError struct { Path string Err error } func (e FileError) Error() string { return fmt.Sprintf("%s: %s", e.Path, e.Err) } func (e FileError) Unwrap() error { return e.Err } var ( // Predefined error messages for invalid files. Not exhaustive. errPathNotClean = errors.New("file path is not clean") errPathNotRelative = errors.New("file path is not relative") errGoModCase = errors.New("go.mod files must have lowercase names") errGoModSize = fmt.Errorf("go.mod file too large (max size is %d bytes)", MaxGoMod) errLICENSESize = fmt.Errorf("LICENSE file too large (max size is %d bytes)", MaxLICENSE) // Predefined error messages for omitted files. Not exhaustive. errVCS = errors.New("directory is a version control repository") errVendored = errors.New("file is in vendor directory") errSubmoduleFile = errors.New("file is in another module") errSubmoduleDir = errors.New("directory is in another module") errHgArchivalTxt = errors.New("file is inserted by 'hg archive' and is always omitted") errSymlink = errors.New("file is a symbolic link") errNotRegular = errors.New("not a regular file") ) // CheckFiles reports whether a list of files satisfy the name and size // constraints listed in the package documentation. The returned CheckedFiles // record contains lists of valid, invalid, and omitted files. Every file in // the given list will be included in exactly one of those lists. // // CheckFiles returns an error if the returned CheckedFiles does not describe // a valid module zip file (according to CheckedFiles.Err). The returned // CheckedFiles is still populated when an error is returned. // // Note that CheckFiles will not open any files, so Create may still fail when // CheckFiles is successful due to I/O errors and reported size differences. func CheckFiles(files []File) (CheckedFiles, error) { cf, _, _ := checkFiles(files) return cf, cf.Err() } // checkFiles implements CheckFiles and also returns lists of valid files and // their sizes, corresponding to cf.Valid. It omits files in submodules, files // in vendored packages, symlinked files, and various other unwanted files. // // The lists returned are used in Create to avoid repeated calls to File.Lstat. func checkFiles(files []File) (cf CheckedFiles, validFiles []File, validSizes []int64) { errPaths := make(map[string]struct{}) addError := func(path string, omitted bool, err error) { if _, ok := errPaths[path]; ok { return } errPaths[path] = struct{}{} fe := FileError{Path: path, Err: err} if omitted { cf.Omitted = append(cf.Omitted, fe) } else { cf.Invalid = append(cf.Invalid, fe) } } // Find directories containing go.mod files (other than the root). // Files in these directories will be omitted. // These directories will not be included in the output zip. haveGoMod := make(map[string]bool) for _, f := range files { p := f.Path() dir, base := path.Split(p) if strings.EqualFold(base, "go.mod") { info, err := f.Lstat() if err != nil { addError(p, false, err) continue } if info.Mode().IsRegular() { haveGoMod[dir] = true } } } inSubmodule := func(p string) bool { for { dir, _ := path.Split(p) if dir == "" { return false } if haveGoMod[dir] { return true } p = dir[:len(dir)-1] } } collisions := make(collisionChecker) maxSize := int64(MaxZipFile) for _, f := range files { p := f.Path() if p != path.Clean(p) { addError(p, false, errPathNotClean) continue } if path.IsAbs(p) { addError(p, false, errPathNotRelative) continue } if isVendoredPackage(p) { // Skip files in vendored packages. addError(p, true, errVendored) continue } if inSubmodule(p) { // Skip submodule files. addError(p, true, errSubmoduleFile) continue } if p == ".hg_archival.txt" { // Inserted by hg archive. // The go command drops this regardless of the VCS being used. addError(p, true, errHgArchivalTxt) continue } if err := module.CheckFilePath(p); err != nil { addError(p, false, err) continue } if strings.ToLower(p) == "go.mod" && p != "go.mod" { addError(p, false, errGoModCase) continue } info, err := f.Lstat() if err != nil { addError(p, false, err) continue } if err := collisions.check(p, info.IsDir()); err != nil { addError(p, false, err) continue } if info.Mode()&os.ModeType == os.ModeSymlink { // Skip symbolic links (golang.org/issue/27093). addError(p, true, errSymlink) continue } if !info.Mode().IsRegular() { addError(p, true, errNotRegular) continue } size := info.Size() if size >= 0 && size <= maxSize { maxSize -= size } else if cf.SizeError == nil { cf.SizeError = fmt.Errorf("module source tree too large (max size is %d bytes)", MaxZipFile) } if p == "go.mod" && size > MaxGoMod { addError(p, false, errGoModSize) continue } if p == "LICENSE" && size > MaxLICENSE { addError(p, false, errLICENSESize) continue } cf.Valid = append(cf.Valid, p) validFiles = append(validFiles, f) validSizes = append(validSizes, info.Size()) } return cf, validFiles, validSizes } // CheckDir reports whether the files in dir satisfy the name and size // constraints listed in the package documentation. The returned [CheckedFiles] // record contains lists of valid, invalid, and omitted files. If a directory is // omitted (for example, a nested module or vendor directory), it will appear in // the omitted list, but its files won't be listed. // // CheckDir returns an error if it encounters an I/O error or if the returned // [CheckedFiles] does not describe a valid module zip file (according to // [CheckedFiles.Err]). The returned [CheckedFiles] is still populated when such // an error is returned. // // Note that CheckDir will not open any files, so [CreateFromDir] may still fail // when CheckDir is successful due to I/O errors. func CheckDir(dir string) (CheckedFiles, error) { // List files (as CreateFromDir would) and check which ones are omitted // or invalid. files, omitted, err := listFilesInDir(dir) if err != nil { return CheckedFiles{}, err } cf, cfErr := CheckFiles(files) _ = cfErr // ignore this error; we'll generate our own after rewriting paths. // Replace all paths with file system paths. // Paths returned by CheckFiles will be slash-separated paths relative to dir. // That's probably not appropriate for error messages. for i := range cf.Valid { cf.Valid[i] = filepath.Join(dir, cf.Valid[i]) } cf.Omitted = append(cf.Omitted, omitted...) for i := range cf.Omitted { cf.Omitted[i].Path = filepath.Join(dir, cf.Omitted[i].Path) } for i := range cf.Invalid { cf.Invalid[i].Path = filepath.Join(dir, cf.Invalid[i].Path) } return cf, cf.Err() } // CheckZip reports whether the files contained in a zip file satisfy the name // and size constraints listed in the package documentation. // // CheckZip returns an error if the returned [CheckedFiles] does not describe // a valid module zip file (according to [CheckedFiles.Err]). The returned // CheckedFiles is still populated when an error is returned. CheckZip will // also return an error if the module path or version is malformed or if it // encounters an error reading the zip file. // // Note that CheckZip does not read individual files, so [Unzip] may still fail // when CheckZip is successful due to I/O errors. func CheckZip(m module.Version, zipFile string) (CheckedFiles, error) { f, err := os.Open(zipFile) if err != nil { return CheckedFiles{}, err } defer f.Close() _, cf, err := checkZip(m, f) return cf, err } // checkZip implements checkZip and also returns the *zip.Reader. This is // used in Unzip to avoid redundant I/O. func checkZip(m module.Version, f *os.File) (*zip.Reader, CheckedFiles, error) { // Make sure the module path and version are valid. if vers := module.CanonicalVersion(m.Version); vers != m.Version { return nil, CheckedFiles{}, fmt.Errorf("version %q is not canonical (should be %q)", m.Version, vers) } if err := module.Check(m.Path, m.Version); err != nil { return nil, CheckedFiles{}, err } // Check the total file size. info, err := f.Stat() if err != nil { return nil, CheckedFiles{}, err } zipSize := info.Size() if zipSize > MaxZipFile { cf := CheckedFiles{SizeError: fmt.Errorf("module zip file is too large (%d bytes; limit is %d bytes)", zipSize, MaxZipFile)} return nil, cf, cf.Err() } // Check for valid file names, collisions. var cf CheckedFiles addError := func(zf *zip.File, err error) { cf.Invalid = append(cf.Invalid, FileError{Path: zf.Name, Err: err}) } z, err := zip.NewReader(f, zipSize) if err != nil { return nil, CheckedFiles{}, err } prefix := fmt.Sprintf("%s@%s/", m.Path, m.Version) collisions := make(collisionChecker) var size int64 for _, zf := range z.File { if !strings.HasPrefix(zf.Name, prefix) { addError(zf, fmt.Errorf("path does not have prefix %q", prefix)) continue } name := zf.Name[len(prefix):] if name == "" { continue } isDir := strings.HasSuffix(name, "/") if isDir { name = name[:len(name)-1] } if path.Clean(name) != name { addError(zf, errPathNotClean) continue } if err := module.CheckFilePath(name); err != nil { addError(zf, err) continue } if err := collisions.check(name, isDir); err != nil { addError(zf, err) continue } if isDir { continue } if base := path.Base(name); strings.EqualFold(base, "go.mod") { if base != name { addError(zf, fmt.Errorf("go.mod file not in module root directory")) continue } if name != "go.mod" { addError(zf, errGoModCase) continue } } sz := int64(zf.UncompressedSize64) if sz >= 0 && MaxZipFile-size >= sz { size += sz } else if cf.SizeError == nil { cf.SizeError = fmt.Errorf("total uncompressed size of module contents too large (max size is %d bytes)", MaxZipFile) } if name == "go.mod" && sz > MaxGoMod { addError(zf, fmt.Errorf("go.mod file too large (max size is %d bytes)", MaxGoMod)) continue } if name == "LICENSE" && sz > MaxLICENSE { addError(zf, fmt.Errorf("LICENSE file too large (max size is %d bytes)", MaxLICENSE)) continue } cf.Valid = append(cf.Valid, zf.Name) } return z, cf, cf.Err() } // Create builds a zip archive for module m from an abstract list of files // and writes it to w. // // Create verifies the restrictions described in the package documentation // and should not produce an archive that [Unzip] cannot extract. Create does not // include files in the output archive if they don't belong in the module zip. // In particular, Create will not include files in modules found in // subdirectories, most files in vendor directories, or irregular files (such // as symbolic links) in the output archive. func Create(w io.Writer, m module.Version, files []File) (err error) { defer func() { if err != nil { err = &zipError{verb: "create zip", err: err} } }() // Check that the version is canonical, the module path is well-formed, and // the major version suffix matches the major version. if vers := module.CanonicalVersion(m.Version); vers != m.Version { return fmt.Errorf("version %q is not canonical (should be %q)", m.Version, vers) } if err := module.Check(m.Path, m.Version); err != nil { return err } // Check whether files are valid, not valid, or should be omitted. // Also check that the valid files don't exceed the maximum size. cf, validFiles, validSizes := checkFiles(files) if err := cf.Err(); err != nil { return err } // Create the module zip file. zw := zip.NewWriter(w) prefix := fmt.Sprintf("%s@%s/", m.Path, m.Version) addFile := func(f File, path string, size int64) error { rc, err := f.Open() if err != nil { return err } defer rc.Close() w, err := zw.Create(prefix + path) if err != nil { return err } lr := &io.LimitedReader{R: rc, N: size + 1} if _, err := io.Copy(w, lr); err != nil { return err } if lr.N <= 0 { return fmt.Errorf("file %q is larger than declared size", path) } return nil } for i, f := range validFiles { p := f.Path() size := validSizes[i] if err := addFile(f, p, size); err != nil { return err } } return zw.Close() } // CreateFromDir creates a module zip file for module m from the contents of // a directory, dir. The zip content is written to w. // // CreateFromDir verifies the restrictions described in the package // documentation and should not produce an archive that [Unzip] cannot extract. // CreateFromDir does not include files in the output archive if they don't // belong in the module zip. In particular, CreateFromDir will not include // files in modules found in subdirectories, most files in vendor directories, // or irregular files (such as symbolic links) in the output archive. // Additionally, unlike [Create], CreateFromDir will not include directories // named ".bzr", ".git", ".hg", or ".svn". func CreateFromDir(w io.Writer, m module.Version, dir string) (err error) { defer func() { if zerr, ok := err.(*zipError); ok { zerr.path = dir } else if err != nil { err = &zipError{verb: "create zip from directory", path: dir, err: err} } }() files, _, err := listFilesInDir(dir) if err != nil { return err } return Create(w, m, files) } // CreateFromVCS creates a module zip file for module m from the contents of a // VCS repository stored locally. The zip content is written to w. // // repoRoot must be an absolute path to the base of the repository, such as // "/Users/some-user/some-repo". // // revision is the revision of the repository to create the zip from. Examples // include HEAD or SHA sums for git repositories. // // subdir must be the relative path from the base of the repository, such as // "sub/dir". To create a zip from the base of the repository, pass an empty // string. // // If CreateFromVCS returns [UnrecognizedVCSError], consider falling back to // [CreateFromDir]. func CreateFromVCS(w io.Writer, m module.Version, repoRoot, revision, subdir string) (err error) { defer func() { if zerr, ok := err.(*zipError); ok { zerr.path = repoRoot } else if err != nil { err = &zipError{verb: "create zip from version control system", path: repoRoot, err: err} } }() var filesToCreate []File switch { case isGitRepo(repoRoot): files, err := filesInGitRepo(repoRoot, revision, subdir) if err != nil { return err } filesToCreate = files default: return &UnrecognizedVCSError{RepoRoot: repoRoot} } return Create(w, m, filesToCreate) } // UnrecognizedVCSError indicates that no recognized version control system was // found in the given directory. type UnrecognizedVCSError struct { RepoRoot string } func (e *UnrecognizedVCSError) Error() string { return fmt.Sprintf("could not find a recognized version control system at %q", e.RepoRoot) } // filesInGitRepo filters out any files that are git ignored in the directory. func filesInGitRepo(dir, rev, subdir string) ([]File, error) { stderr := bytes.Buffer{} stdout := bytes.Buffer{} // Incredibly, git produces different archives depending on whether // it is running on a Windows system or not, in an attempt to normalize // text file line endings. Setting -c core.autocrlf=input means only // translate files on the way into the repo, not on the way out (archive). // The -c core.eol=lf should be unnecessary but set it anyway. // // Note: We use git archive to understand which files are actually included, // ignoring things like .gitignore'd files. We could also use other // techniques like git ls-files, but this approach most closely matches what // the Go command does, which is beneficial. // // Note: some of this code copied from https://go.googlesource.com/go/+/refs/tags/go1.16.5/src/cmd/go/internal/modfetch/codehost/git.go#826. cmd := exec.Command("git", "-c", "core.autocrlf=input", "-c", "core.eol=lf", "archive", "--format=zip", rev) if subdir != "" { cmd.Args = append(cmd.Args, subdir) } cmd.Dir = dir cmd.Env = append(os.Environ(), "PWD="+dir) cmd.Stdout = &stdout cmd.Stderr = &stderr if err := cmd.Run(); err != nil { return nil, fmt.Errorf("error running `git archive`: %w, %s", err, stderr.String()) } rawReader := bytes.NewReader(stdout.Bytes()) zipReader, err := zip.NewReader(rawReader, int64(stdout.Len())) if err != nil { return nil, err } haveLICENSE := false var fs []File for _, zf := range zipReader.File { if !strings.HasPrefix(zf.Name, subdir) || strings.HasSuffix(zf.Name, "/") { continue } n := strings.TrimPrefix(zf.Name, subdir) if n == "" { continue } n = strings.TrimPrefix(n, "/") fs = append(fs, zipFile{ name: n, f: zf, }) if n == "LICENSE" { haveLICENSE = true } } if !haveLICENSE && subdir != "" { // Note: this method of extracting the license from the root copied from // https://go.googlesource.com/go/+/refs/tags/go1.20.4/src/cmd/go/internal/modfetch/coderepo.go#1118 // https://go.googlesource.com/go/+/refs/tags/go1.20.4/src/cmd/go/internal/modfetch/codehost/git.go#657 cmd := exec.Command("git", "cat-file", "blob", rev+":LICENSE") cmd.Dir = dir cmd.Env = append(os.Environ(), "PWD="+dir) stdout := bytes.Buffer{} cmd.Stdout = &stdout if err := cmd.Run(); err == nil { fs = append(fs, dataFile{name: "LICENSE", data: stdout.Bytes()}) } } return fs, nil } // isGitRepo reports whether the given directory is a git repo. func isGitRepo(dir string) bool { stdout := &bytes.Buffer{} cmd := exec.Command("git", "rev-parse", "--git-dir") cmd.Dir = dir cmd.Env = append(os.Environ(), "PWD="+dir) cmd.Stdout = stdout if err := cmd.Run(); err != nil { return false } gitDir := strings.TrimSpace(stdout.String()) if !filepath.IsAbs(gitDir) { gitDir = filepath.Join(dir, gitDir) } wantDir := filepath.Join(dir, ".git") return wantDir == gitDir } type dirFile struct { filePath, slashPath string info os.FileInfo } func (f dirFile) Path() string { return f.slashPath } func (f dirFile) Lstat() (os.FileInfo, error) { return f.info, nil } func (f dirFile) Open() (io.ReadCloser, error) { return os.Open(f.filePath) } type zipFile struct { name string f *zip.File } func (f zipFile) Path() string { return f.name } func (f zipFile) Lstat() (os.FileInfo, error) { return f.f.FileInfo(), nil } func (f zipFile) Open() (io.ReadCloser, error) { return f.f.Open() } type dataFile struct { name string data []byte } func (f dataFile) Path() string { return f.name } func (f dataFile) Lstat() (os.FileInfo, error) { return dataFileInfo{f}, nil } func (f dataFile) Open() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(f.data)), nil } type dataFileInfo struct { f dataFile } func (fi dataFileInfo) Name() string { return path.Base(fi.f.name) } func (fi dataFileInfo) Size() int64 { return int64(len(fi.f.data)) } func (fi dataFileInfo) Mode() os.FileMode { return 0644 } func (fi dataFileInfo) ModTime() time.Time { return time.Time{} } func (fi dataFileInfo) IsDir() bool { return false } func (fi dataFileInfo) Sys() interface{} { return nil } // isVendoredPackage attempts to report whether the given filename is contained // in a package whose import path contains (but does not end with) the component // "vendor". // // Unfortunately, isVendoredPackage reports false positives for files in any // non-top-level package whose import path ends in "vendor". func isVendoredPackage(name string) bool { var i int if strings.HasPrefix(name, "vendor/") { i += len("vendor/") } else if j := strings.Index(name, "/vendor/"); j >= 0 { // This offset looks incorrect; this should probably be // // i = j + len("/vendor/") // // (See https://golang.org/issue/31562 and https://golang.org/issue/37397.) // Unfortunately, we can't fix it without invalidating module checksums. i += len("/vendor/") } else { return false } return strings.Contains(name[i:], "/") } // Unzip extracts the contents of a module zip file to a directory. // // Unzip checks all restrictions listed in the package documentation and returns // an error if the zip archive is not valid. In some cases, files may be written // to dir before an error is returned (for example, if a file's uncompressed // size does not match its declared size). // // dir may or may not exist: Unzip will create it and any missing parent // directories if it doesn't exist. If dir exists, it must be empty. func Unzip(dir string, m module.Version, zipFile string) (err error) { defer func() { if err != nil { err = &zipError{verb: "unzip", path: zipFile, err: err} } }() // Check that the directory is empty. Don't create it yet in case there's // an error reading the zip. if files, _ := os.ReadDir(dir); len(files) > 0 { return fmt.Errorf("target directory %v exists and is not empty", dir) } // Open the zip and check that it satisfies all restrictions. f, err := os.Open(zipFile) if err != nil { return err } defer f.Close() z, cf, err := checkZip(m, f) if err != nil { return err } if err := cf.Err(); err != nil { return err } // Unzip, enforcing sizes declared in the zip file. prefix := fmt.Sprintf("%s@%s/", m.Path, m.Version) if err := os.MkdirAll(dir, 0777); err != nil { return err } for _, zf := range z.File { name := zf.Name[len(prefix):] if name == "" || strings.HasSuffix(name, "/") { continue } dst := filepath.Join(dir, name) if err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil { return err } w, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0444) if err != nil { return err } r, err := zf.Open() if err != nil { w.Close() return err } lr := &io.LimitedReader{R: r, N: int64(zf.UncompressedSize64) + 1} _, err = io.Copy(w, lr) r.Close() if err != nil { w.Close() return err } if err := w.Close(); err != nil { return err } if lr.N <= 0 { return fmt.Errorf("uncompressed size of file %s is larger than declared size (%d bytes)", zf.Name, zf.UncompressedSize64) } } return nil } // collisionChecker finds case-insensitive name collisions and paths that // are listed as both files and directories. // // The keys of this map are processed with strToFold. pathInfo has the original // path for each folded path. type collisionChecker map[string]pathInfo type pathInfo struct { path string isDir bool } func (cc collisionChecker) check(p string, isDir bool) error { fold := strToFold(p) if other, ok := cc[fold]; ok { if p != other.path { return fmt.Errorf("case-insensitive file name collision: %q and %q", other.path, p) } if isDir != other.isDir { return fmt.Errorf("entry %q is both a file and a directory", p) } if !isDir { return fmt.Errorf("multiple entries for file %q", p) } // It's not an error if check is called with the same directory multiple // times. check is called recursively on parent directories, so check // may be called on the same directory many times. } else { cc[fold] = pathInfo{path: p, isDir: isDir} } if parent := path.Dir(p); parent != "." { return cc.check(parent, true) } return nil } // listFilesInDir walks the directory tree rooted at dir and returns a list of // files, as well as a list of directories and files that were skipped (for // example, nested modules and symbolic links). func listFilesInDir(dir string) (files []File, omitted []FileError, err error) { err = filepath.Walk(dir, func(filePath string, info os.FileInfo, err error) error { if err != nil { return err } relPath, err := filepath.Rel(dir, filePath) if err != nil { return err } slashPath := filepath.ToSlash(relPath) // Skip some subdirectories inside vendor, but maintain bug // golang.org/issue/31562, described in isVendoredPackage. // We would like Create and CreateFromDir to produce the same result // for a set of files, whether expressed as a directory tree or zip. if isVendoredPackage(slashPath) { omitted = append(omitted, FileError{Path: slashPath, Err: errVendored}) return nil } if info.IsDir() { if filePath == dir { // Don't skip the top-level directory. return nil } // Skip VCS directories. // fossil repos are regular files with arbitrary names, so we don't try // to exclude them. switch filepath.Base(filePath) { case ".bzr", ".git", ".hg", ".svn": omitted = append(omitted, FileError{Path: slashPath, Err: errVCS}) return filepath.SkipDir } // Skip submodules (directories containing go.mod files). if goModInfo, err := os.Lstat(filepath.Join(filePath, "go.mod")); err == nil && !goModInfo.IsDir() { omitted = append(omitted, FileError{Path: slashPath, Err: errSubmoduleDir}) return filepath.SkipDir } return nil } // Skip irregular files and files in vendor directories. // Irregular files are ignored. They're typically symbolic links. if !info.Mode().IsRegular() { omitted = append(omitted, FileError{Path: slashPath, Err: errNotRegular}) return nil } files = append(files, dirFile{ filePath: filePath, slashPath: slashPath, info: info, }) return nil }) if err != nil { return nil, nil, err } return files, omitted, nil } type zipError struct { verb, path string err error } func (e *zipError) Error() string { if e.path == "" { return fmt.Sprintf("%s: %v", e.verb, e.err) } else { return fmt.Sprintf("%s %s: %v", e.verb, e.path, e.err) } } func (e *zipError) Unwrap() error { return e.err } // strToFold returns a string with the property that // // strings.EqualFold(s, t) iff strToFold(s) == strToFold(t) // // This lets us test a large set of strings for fold-equivalent // duplicates without making a quadratic number of calls // to EqualFold. Note that strings.ToUpper and strings.ToLower // do not have the desired property in some corner cases. func strToFold(s string) string { // Fast path: all ASCII, no upper case. // Most paths look like this already. for i := 0; i < len(s); i++ { c := s[i] if c >= utf8.RuneSelf || 'A' <= c && c <= 'Z' { goto Slow } } return s Slow: var buf bytes.Buffer for _, r := range s { // SimpleFold(x) cycles to the next equivalent rune > x // or wraps around to smaller values. Iterate until it wraps, // and we've found the minimum value. for { r0 := r r = unicode.SimpleFold(r0) if r <= r0 { break } } // Exception to allow fast path above: A-Z => a-z if 'A' <= r && r <= 'Z' { r += 'a' - 'A' } buf.WriteRune(r) } return buf.String() } mod-0.19.0/zip/zip_test.go000066400000000000000000001511741463702072700153730ustar00rootroot00000000000000// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package zip_test import ( "archive/zip" "bytes" "crypto/sha256" "encoding/hex" "errors" "fmt" "io" "os" "os/exec" "path" "path/filepath" "runtime" "strings" "sync" "sync/atomic" "testing" "time" "golang.org/x/mod/module" "golang.org/x/mod/sumdb/dirhash" modzip "golang.org/x/mod/zip" "golang.org/x/tools/txtar" ) const emptyHash = "h1:47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=" var gitOnce struct { path string err error sync.Once } func init() { if os.Getenv("GO_BUILDER_NAME") != "" || os.Getenv("GIT_TRACE_CURL") == "1" { // Enable extra Git logging to diagnose networking issues. // (These environment variables will be inherited by subprocesses.) os.Setenv("GIT_TRACE_CURL", "1") os.Setenv("GIT_TRACE_CURL_NO_DATA", "1") os.Setenv("GIT_REDACT_COOKIES", "o,SSO,GSSO_Uberproxy") } } // gitPath returns the path to a usable "git" command, // or a non-nil error. func gitPath() (string, error) { gitOnce.Do(func() { path, err := exec.LookPath("git") if err != nil { gitOnce.err = err return } if runtime.GOOS == "plan9" { gitOnce.err = errors.New("plan9 git does not support the full git command line") } gitOnce.path = path }) return gitOnce.path, gitOnce.err } func mustHaveGit(t testing.TB) { if _, err := gitPath(); err != nil { t.Helper() t.Skipf("skipping: %v", err) } } type testParams struct { path, version, wantErr, hash string archive *txtar.Archive } // readTest loads a test from a txtar file. The comment section of the file // should contain lines with key=value pairs. Valid keys are the field names // from testParams. func readTest(file string) (testParams, error) { var test testParams var err error test.archive, err = txtar.ParseFile(file) if err != nil { return testParams{}, err } lines := strings.Split(string(test.archive.Comment), "\n") for n, line := range lines { n++ // report line numbers starting with 1 if i := strings.IndexByte(line, '#'); i >= 0 { line = line[:i] } line = strings.TrimSpace(line) if line == "" { continue } eq := strings.IndexByte(line, '=') if eq < 0 { return testParams{}, fmt.Errorf("%s:%d: missing = separator", file, n) } key, value := strings.TrimSpace(line[:eq]), strings.TrimSpace(line[eq+1:]) switch key { case "path": test.path = value case "version": test.version = value case "wantErr": test.wantErr = value case "hash": test.hash = value default: return testParams{}, fmt.Errorf("%s:%d: unknown key %q", file, n, key) } } return test, nil } func extractTxtarToTempDir(t testing.TB, arc *txtar.Archive) (dir string, err error) { dir = t.TempDir() for _, f := range arc.Files { filePath := filepath.Join(dir, f.Name) if err := os.MkdirAll(filepath.Dir(filePath), 0777); err != nil { return "", err } if err := os.WriteFile(filePath, f.Data, 0666); err != nil { return "", err } } return dir, nil } func extractTxtarToTempZip(t *testing.T, arc *txtar.Archive) (zipPath string, err error) { zipPath = filepath.Join(t.TempDir(), "txtar.zip") zipFile, err := os.Create(zipPath) if err != nil { return "", err } defer func() { if cerr := zipFile.Close(); err == nil && cerr != nil { err = cerr } }() zw := zip.NewWriter(zipFile) for _, f := range arc.Files { zf, err := zw.Create(f.Name) if err != nil { return "", err } if _, err := zf.Write(f.Data); err != nil { return "", err } } if err := zw.Close(); err != nil { return "", err } return zipFile.Name(), nil } type fakeFile struct { name string size uint64 data []byte // if nil, Open will access a sequence of 0-bytes } func (f fakeFile) Path() string { return f.name } func (f fakeFile) Lstat() (os.FileInfo, error) { return fakeFileInfo{f}, nil } func (f fakeFile) Open() (io.ReadCloser, error) { if f.data != nil { return io.NopCloser(bytes.NewReader(f.data)), nil } if f.size >= uint64(modzip.MaxZipFile<<1) { return nil, fmt.Errorf("cannot open fakeFile of size %d", f.size) } return io.NopCloser(io.LimitReader(zeroReader{}, int64(f.size))), nil } type fakeFileInfo struct { f fakeFile } func (fi fakeFileInfo) Name() string { return path.Base(fi.f.name) } func (fi fakeFileInfo) Size() int64 { return int64(fi.f.size) } func (fi fakeFileInfo) Mode() os.FileMode { return 0644 } func (fi fakeFileInfo) ModTime() time.Time { return time.Time{} } func (fi fakeFileInfo) IsDir() bool { return false } func (fi fakeFileInfo) Sys() interface{} { return nil } type zeroReader struct{} func (r zeroReader) Read(b []byte) (int, error) { for i := range b { b[i] = 0 } return len(b), nil } func formatCheckedFiles(cf modzip.CheckedFiles) string { buf := &bytes.Buffer{} fmt.Fprintf(buf, "valid:\n") for _, f := range cf.Valid { fmt.Fprintln(buf, f) } fmt.Fprintf(buf, "\nomitted:\n") for _, f := range cf.Omitted { fmt.Fprintf(buf, "%s: %v\n", f.Path, f.Err) } fmt.Fprintf(buf, "\ninvalid:\n") for _, f := range cf.Invalid { fmt.Fprintf(buf, "%s: %v\n", f.Path, f.Err) } return buf.String() } // TestCheckFiles verifies behavior of CheckFiles. Note that CheckFiles is also // covered by TestCreate, TestCreateDir, and TestCreateSizeLimits, so this test // focuses on how multiple errors and omissions are reported, rather than trying // to cover every case. func TestCheckFiles(t *testing.T) { testPaths, err := filepath.Glob(filepath.FromSlash("testdata/check_files/*.txt")) if err != nil { t.Fatal(err) } for _, testPath := range testPaths { testPath := testPath name := strings.TrimSuffix(filepath.Base(testPath), ".txt") t.Run(name, func(t *testing.T) { t.Parallel() // Load the test. test, err := readTest(testPath) if err != nil { t.Fatal(err) } files := make([]modzip.File, 0, len(test.archive.Files)) var want string for _, tf := range test.archive.Files { if tf.Name == "want" { want = string(tf.Data) continue } files = append(files, fakeFile{ name: tf.Name, size: uint64(len(tf.Data)), data: tf.Data, }) } // Check the files. cf, _ := modzip.CheckFiles(files) got := formatCheckedFiles(cf) if got != want { t.Errorf("got:\n%s\n\nwant:\n%s", got, want) } // Check that the error (if any) is just a list of invalid files. // SizeError is not covered in this test. var gotErr, wantErr string if len(cf.Invalid) > 0 { wantErr = modzip.FileErrorList(cf.Invalid).Error() } if err := cf.Err(); err != nil { gotErr = err.Error() } if gotErr != wantErr { t.Errorf("got error:\n%s\n\nwant error:\n%s", gotErr, wantErr) } }) } } // TestCheckDir verifies behavior of the CheckDir function. Note that CheckDir // relies on CheckFiles and listFilesInDir (called by CreateFromDir), so this // test focuses on how multiple errors and omissions are reported, rather than // trying to cover every case. func TestCheckDir(t *testing.T) { testPaths, err := filepath.Glob(filepath.FromSlash("testdata/check_dir/*.txt")) if err != nil { t.Fatal(err) } for _, testPath := range testPaths { testPath := testPath name := strings.TrimSuffix(filepath.Base(testPath), ".txt") t.Run(name, func(t *testing.T) { t.Parallel() // Load the test and extract the files to a temporary directory. test, err := readTest(testPath) if err != nil { t.Fatal(err) } var want string for i, f := range test.archive.Files { if f.Name == "want" { want = string(f.Data) test.archive.Files = append(test.archive.Files[:i], test.archive.Files[i+1:]...) break } } tmpDir, err := extractTxtarToTempDir(t, test.archive) if err != nil { t.Fatal(err) } // Check the directory. cf, err := modzip.CheckDir(tmpDir) if err != nil && err.Error() != cf.Err().Error() { // I/O error t.Fatal(err) } rep := strings.NewReplacer(tmpDir, "$work", `'\''`, `'\''`, string(os.PathSeparator), "/") got := rep.Replace(formatCheckedFiles(cf)) if got != want { t.Errorf("got:\n%s\n\nwant:\n%s", got, want) } // Check that the error (if any) is just a list of invalid files. // SizeError is not covered in this test. var gotErr, wantErr string if len(cf.Invalid) > 0 { wantErr = modzip.FileErrorList(cf.Invalid).Error() } if err := cf.Err(); err != nil { gotErr = err.Error() } if gotErr != wantErr { t.Errorf("got error:\n%s\n\nwant error:\n%s", gotErr, wantErr) } }) } } // TestCheckZip verifies behavior of CheckZip. Note that CheckZip is also // covered by TestUnzip, so this test focuses on how multiple errors are // reported, rather than trying to cover every case. func TestCheckZip(t *testing.T) { testPaths, err := filepath.Glob(filepath.FromSlash("testdata/check_zip/*.txt")) if err != nil { t.Fatal(err) } for _, testPath := range testPaths { testPath := testPath name := strings.TrimSuffix(filepath.Base(testPath), ".txt") t.Run(name, func(t *testing.T) { t.Parallel() // Load the test and extract the files to a temporary zip file. test, err := readTest(testPath) if err != nil { t.Fatal(err) } var want string for i, f := range test.archive.Files { if f.Name == "want" { want = string(f.Data) test.archive.Files = append(test.archive.Files[:i], test.archive.Files[i+1:]...) break } } tmpZipPath, err := extractTxtarToTempZip(t, test.archive) if err != nil { t.Fatal(err) } // Check the zip. m := module.Version{Path: test.path, Version: test.version} cf, err := modzip.CheckZip(m, tmpZipPath) if err != nil && err.Error() != cf.Err().Error() { // I/O error t.Fatal(err) } got := formatCheckedFiles(cf) if got != want { t.Errorf("got:\n%s\n\nwant:\n%s", got, want) } // Check that the error (if any) is just a list of invalid files. // SizeError is not covered in this test. var gotErr, wantErr string if len(cf.Invalid) > 0 { wantErr = modzip.FileErrorList(cf.Invalid).Error() } if err := cf.Err(); err != nil { gotErr = err.Error() } if gotErr != wantErr { t.Errorf("got error:\n%s\n\nwant error:\n%s", gotErr, wantErr) } }) } } func TestCreate(t *testing.T) { testDir := filepath.FromSlash("testdata/create") testEntries, err := os.ReadDir(testDir) if err != nil { t.Fatal(err) } for _, testEntry := range testEntries { testEntry := testEntry base := filepath.Base(testEntry.Name()) if filepath.Ext(base) != ".txt" { continue } t.Run(base[:len(base)-len(".txt")], func(t *testing.T) { t.Parallel() // Load the test. testPath := filepath.Join(testDir, testEntry.Name()) test, err := readTest(testPath) if err != nil { t.Fatal(err) } // Write zip to temporary file. tmpZip, err := os.CreateTemp(t.TempDir(), "TestCreate-*.zip") if err != nil { t.Fatal(err) } tmpZipPath := tmpZip.Name() defer tmpZip.Close() m := module.Version{Path: test.path, Version: test.version} files := make([]modzip.File, len(test.archive.Files)) for i, tf := range test.archive.Files { files[i] = fakeFile{ name: tf.Name, size: uint64(len(tf.Data)), data: tf.Data, } } if err := modzip.Create(tmpZip, m, files); err != nil { if test.wantErr == "" { t.Fatalf("unexpected error: %v", err) } else if !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("got error %q; want error containing %q", err.Error(), test.wantErr) } else { return } } else if test.wantErr != "" { t.Fatalf("unexpected success; wanted error containing %q", test.wantErr) } if err := tmpZip.Close(); err != nil { t.Fatal(err) } // Hash zip file, compare with known value. if hash, err := dirhash.HashZip(tmpZipPath, dirhash.Hash1); err != nil { t.Fatal(err) } else if hash != test.hash { t.Fatalf("got hash: %q\nwant: %q", hash, test.hash) } }) } } func TestCreateFromDir(t *testing.T) { testDir := filepath.FromSlash("testdata/create_from_dir") testEntries, err := os.ReadDir(testDir) if err != nil { t.Fatal(err) } for _, testEntry := range testEntries { testEntry := testEntry base := filepath.Base(testEntry.Name()) if filepath.Ext(base) != ".txt" { continue } t.Run(base[:len(base)-len(".txt")], func(t *testing.T) { t.Parallel() // Load the test. testPath := filepath.Join(testDir, testEntry.Name()) test, err := readTest(testPath) if err != nil { t.Fatal(err) } // Write files to a temporary directory. tmpDir, err := extractTxtarToTempDir(t, test.archive) if err != nil { t.Fatal(err) } // Create zip from the directory. tmpZip, err := os.CreateTemp(t.TempDir(), "TestCreateFromDir-*.zip") if err != nil { t.Fatal(err) } tmpZipPath := tmpZip.Name() defer tmpZip.Close() m := module.Version{Path: test.path, Version: test.version} if err := modzip.CreateFromDir(tmpZip, m, tmpDir); err != nil { if test.wantErr == "" { t.Fatalf("unexpected error: %v", err) } else if !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("got error %q; want error containing %q", err, test.wantErr) } else { return } } else if test.wantErr != "" { t.Fatalf("unexpected success; want error containing %q", test.wantErr) } // Hash zip file, compare with known value. if hash, err := dirhash.HashZip(tmpZipPath, dirhash.Hash1); err != nil { t.Fatal(err) } else if hash != test.hash { t.Fatalf("got hash: %q\nwant: %q", hash, test.hash) } }) } } func TestCreateFromDirSpecial(t *testing.T) { for _, test := range []struct { desc string setup func(t *testing.T, tmpDir string) string wantHash string }{ { desc: "ignore_empty_dir", setup: func(t *testing.T, tmpDir string) string { if err := os.Mkdir(filepath.Join(tmpDir, "empty"), 0777); err != nil { t.Fatal(err) } return tmpDir }, wantHash: emptyHash, }, { desc: "ignore_symlink", setup: func(t *testing.T, tmpDir string) string { if err := os.Symlink(tmpDir, filepath.Join(tmpDir, "link")); err != nil { switch runtime.GOOS { case "aix", "android", "darwin", "dragonfly", "freebsd", "illumos", "ios", "js", "linux", "netbsd", "openbsd", "solaris": // Symlinks in tmpDir are always expected to work on these platforms. t.Fatal(err) default: t.Skipf("could not create symlink: %v", err) } } return tmpDir }, wantHash: emptyHash, }, { desc: "dir_is_vendor", setup: func(t *testing.T, tmpDir string) string { vendorDir := filepath.Join(tmpDir, "vendor") if err := os.Mkdir(vendorDir, 0777); err != nil { t.Fatal(err) } goModData := []byte("module example.com/m\n\ngo 1.13\n") if err := os.WriteFile(filepath.Join(vendorDir, "go.mod"), goModData, 0666); err != nil { t.Fatal(err) } return vendorDir }, wantHash: "h1:XduFAgX/GaspZa8Jv4pfzoGEzNaU/r88PiCunijw5ok=", }, } { t.Run(test.desc, func(t *testing.T) { dir := test.setup(t, t.TempDir()) tmpZipFile, err := os.CreateTemp(t.TempDir(), "TestCreateFromDir-*.zip") if err != nil { t.Fatal(err) } tmpZipPath := tmpZipFile.Name() defer tmpZipFile.Close() m := module.Version{Path: "example.com/m", Version: "v1.0.0"} if err := modzip.CreateFromDir(tmpZipFile, m, dir); err != nil { t.Fatal(err) } if err := tmpZipFile.Close(); err != nil { t.Fatal(err) } if hash, err := dirhash.HashZip(tmpZipPath, dirhash.Hash1); err != nil { t.Fatal(err) } else if hash != test.wantHash { t.Fatalf("got hash %q; want %q", hash, emptyHash) } }) } } func TestUnzip(t *testing.T) { testDir := filepath.FromSlash("testdata/unzip") testEntries, err := os.ReadDir(testDir) if err != nil { t.Fatal(err) } for _, testEntry := range testEntries { base := filepath.Base(testEntry.Name()) if filepath.Ext(base) != ".txt" { continue } t.Run(base[:len(base)-len(".txt")], func(t *testing.T) { // Load the test. testPath := filepath.Join(testDir, testEntry.Name()) test, err := readTest(testPath) if err != nil { t.Fatal(err) } // Convert txtar to temporary zip file. tmpZipPath, err := extractTxtarToTempZip(t, test.archive) if err != nil { t.Fatal(err) } // Extract to a temporary directory. tmpDir := t.TempDir() m := module.Version{Path: test.path, Version: test.version} if err := modzip.Unzip(tmpDir, m, tmpZipPath); err != nil { if test.wantErr == "" { t.Fatalf("unexpected error: %v", err) } else if !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("got error %q; want error containing %q", err.Error(), test.wantErr) } else { return } } else if test.wantErr != "" { t.Fatalf("unexpected success; wanted error containing %q", test.wantErr) } // Hash the directory, compare to known value. prefix := fmt.Sprintf("%s@%s/", test.path, test.version) if hash, err := dirhash.HashDir(tmpDir, prefix, dirhash.Hash1); err != nil { t.Fatal(err) } else if hash != test.hash { t.Fatalf("got hash %q\nwant: %q", hash, test.hash) } }) } } type sizeLimitTest struct { desc string files []modzip.File wantErr string wantCheckFilesErr string wantCreateErr string wantCheckZipErr string wantUnzipErr string } // sizeLimitTests is shared by TestCreateSizeLimits and TestUnzipSizeLimits. var sizeLimitTests = [...]sizeLimitTest{ { desc: "one_large", files: []modzip.File{fakeFile{ name: "large.go", size: modzip.MaxZipFile, }}, }, { desc: "one_too_large", files: []modzip.File{fakeFile{ name: "large.go", size: modzip.MaxZipFile + 1, }}, wantCheckFilesErr: "module source tree too large", wantCreateErr: "module source tree too large", wantCheckZipErr: "total uncompressed size of module contents too large", wantUnzipErr: "total uncompressed size of module contents too large", }, { desc: "total_large", files: []modzip.File{ fakeFile{ name: "small.go", size: 10, }, fakeFile{ name: "large.go", size: modzip.MaxZipFile - 10, }, }, }, { desc: "total_too_large", files: []modzip.File{ fakeFile{ name: "small.go", size: 10, }, fakeFile{ name: "large.go", size: modzip.MaxZipFile - 9, }, }, wantCheckFilesErr: "module source tree too large", wantCreateErr: "module source tree too large", wantCheckZipErr: "total uncompressed size of module contents too large", wantUnzipErr: "total uncompressed size of module contents too large", }, { desc: "large_gomod", files: []modzip.File{fakeFile{ name: "go.mod", size: modzip.MaxGoMod, }}, }, { desc: "too_large_gomod", files: []modzip.File{fakeFile{ name: "go.mod", size: modzip.MaxGoMod + 1, }}, wantErr: "go.mod file too large", }, { desc: "large_license", files: []modzip.File{fakeFile{ name: "LICENSE", size: modzip.MaxLICENSE, }}, }, { desc: "too_large_license", files: []modzip.File{fakeFile{ name: "LICENSE", size: modzip.MaxLICENSE + 1, }}, wantErr: "LICENSE file too large", }, } var sizeLimitVersion = module.Version{Path: "example.com/large", Version: "v1.0.0"} func TestCreateSizeLimits(t *testing.T) { if testing.Short() { t.Skip("creating large files takes time") } tests := append(sizeLimitTests[:], sizeLimitTest{ // negative file size may happen when size is represented as uint64 // but is cast to int64, as is the case in zip files. desc: "negative", files: []modzip.File{fakeFile{ name: "neg.go", size: 0x8000000000000000, }}, wantErr: "module source tree too large", }, sizeLimitTest{ desc: "size_is_a_lie", files: []modzip.File{fakeFile{ name: "lie.go", size: 1, data: []byte(`package large`), }}, wantCreateErr: "larger than declared size", }) for _, test := range tests { test := test t.Run(test.desc, func(t *testing.T) { t.Parallel() wantCheckFilesErr := test.wantCheckFilesErr if wantCheckFilesErr == "" { wantCheckFilesErr = test.wantErr } if _, err := modzip.CheckFiles(test.files); err == nil && wantCheckFilesErr != "" { t.Fatalf("CheckFiles: unexpected success; want error containing %q", wantCheckFilesErr) } else if err != nil && wantCheckFilesErr == "" { t.Fatalf("CheckFiles: got error %q; want success", err) } else if err != nil && !strings.Contains(err.Error(), wantCheckFilesErr) { t.Fatalf("CheckFiles: got error %q; want error containing %q", err, wantCheckFilesErr) } wantCreateErr := test.wantCreateErr if wantCreateErr == "" { wantCreateErr = test.wantErr } if err := modzip.Create(io.Discard, sizeLimitVersion, test.files); err == nil && wantCreateErr != "" { t.Fatalf("Create: unexpected success; want error containing %q", wantCreateErr) } else if err != nil && wantCreateErr == "" { t.Fatalf("Create: got error %q; want success", err) } else if err != nil && !strings.Contains(err.Error(), wantCreateErr) { t.Fatalf("Create: got error %q; want error containing %q", err, wantCreateErr) } }) } } func TestUnzipSizeLimits(t *testing.T) { if testing.Short() { t.Skip("creating large files takes time") } for _, test := range sizeLimitTests { test := test t.Run(test.desc, func(t *testing.T) { t.Parallel() tmpZipFile, err := os.CreateTemp(t.TempDir(), "TestUnzipSizeLimits-*.zip") if err != nil { t.Fatal(err) } tmpZipPath := tmpZipFile.Name() defer tmpZipFile.Close() zw := zip.NewWriter(tmpZipFile) prefix := fmt.Sprintf("%s@%s/", sizeLimitVersion.Path, sizeLimitVersion.Version) for _, tf := range test.files { zf, err := zw.Create(prefix + tf.Path()) if err != nil { t.Fatal(err) } rc, err := tf.Open() if err != nil { t.Fatal(err) } _, err = io.Copy(zf, rc) rc.Close() if err != nil { t.Fatal(err) } } if err := zw.Close(); err != nil { t.Fatal(err) } if err := tmpZipFile.Close(); err != nil { t.Fatal(err) } wantCheckZipErr := test.wantCheckZipErr if wantCheckZipErr == "" { wantCheckZipErr = test.wantErr } cf, err := modzip.CheckZip(sizeLimitVersion, tmpZipPath) if err == nil { err = cf.Err() } if err == nil && wantCheckZipErr != "" { t.Fatalf("CheckZip: unexpected success; want error containing %q", wantCheckZipErr) } else if err != nil && wantCheckZipErr == "" { t.Fatalf("CheckZip: got error %q; want success", err) } else if err != nil && !strings.Contains(err.Error(), wantCheckZipErr) { t.Fatalf("CheckZip: got error %q; want error containing %q", err, wantCheckZipErr) } wantUnzipErr := test.wantUnzipErr if wantUnzipErr == "" { wantUnzipErr = test.wantErr } if err := modzip.Unzip(t.TempDir(), sizeLimitVersion, tmpZipPath); err == nil && wantUnzipErr != "" { t.Fatalf("Unzip: unexpected success; want error containing %q", wantUnzipErr) } else if err != nil && wantUnzipErr == "" { t.Fatalf("Unzip: got error %q; want success", err) } else if err != nil && !strings.Contains(err.Error(), wantUnzipErr) { t.Fatalf("Unzip: got error %q; want error containing %q", err, wantUnzipErr) } }) } } func TestUnzipSizeLimitsSpecial(t *testing.T) { if testing.Short() { t.Skip("skipping test; creating large files takes time") } for _, test := range []struct { desc string wantErr1, wantErr2 string m module.Version writeZip func(t *testing.T, zipFile *os.File) }{ { desc: "large_zip", m: module.Version{Path: "example.com/m", Version: "v1.0.0"}, writeZip: func(t *testing.T, zipFile *os.File) { if err := zipFile.Truncate(modzip.MaxZipFile); err != nil { t.Fatal(err) } }, // this is not an error we care about; we're just testing whether // Unzip checks the size of the file before opening. // It's harder to create a valid zip file of exactly the right size. wantErr1: "not a valid zip file", }, { desc: "too_large_zip", m: module.Version{Path: "example.com/m", Version: "v1.0.0"}, writeZip: func(t *testing.T, zipFile *os.File) { if err := zipFile.Truncate(modzip.MaxZipFile + 1); err != nil { t.Fatal(err) } }, wantErr1: "module zip file is too large", }, { desc: "size_is_a_lie", m: module.Version{Path: "example.com/m", Version: "v1.0.0"}, writeZip: func(t *testing.T, zipFile *os.File) { // Create a normal zip file in memory containing one file full of zero // bytes. Use a distinctive size so we can find it later. zipBuf := &bytes.Buffer{} zw := zip.NewWriter(zipBuf) f, err := zw.Create("example.com/m@v1.0.0/go.mod") if err != nil { t.Fatal(err) } realSize := 0x0BAD buf := make([]byte, realSize) if _, err := f.Write(buf); err != nil { t.Fatal(err) } if err := zw.Close(); err != nil { t.Fatal(err) } // Replace the uncompressed size of the file. As a shortcut, we just // search-and-replace the byte sequence. It should occur twice because // the 32- and 64-byte sizes are stored separately. All multi-byte // values are little-endian. zipData := zipBuf.Bytes() realSizeData := []byte{0xAD, 0x0B} fakeSizeData := []byte{0xAC, 0x00} s := zipData n := 0 for { if i := bytes.Index(s, realSizeData); i < 0 { break } else { s = s[i:] } copy(s[:len(fakeSizeData)], fakeSizeData) n++ } if n != 2 { t.Fatalf("replaced size %d times; expected 2", n) } // Write the modified zip to the actual file. if _, err := zipFile.Write(zipData); err != nil { t.Fatal(err) } }, // wantErr1 is for 1.18 and earlier, // wantErr2 is for 1.19 and later. wantErr1: "uncompressed size of file example.com/m@v1.0.0/go.mod is larger than declared size", wantErr2: "not a valid zip file", }, } { test := test t.Run(test.desc, func(t *testing.T) { t.Parallel() tmpZipFile, err := os.CreateTemp(t.TempDir(), "TestUnzipSizeLimitsSpecial-*.zip") if err != nil { t.Fatal(err) } tmpZipPath := tmpZipFile.Name() defer tmpZipFile.Close() test.writeZip(t, tmpZipFile) if err := tmpZipFile.Close(); err != nil { t.Fatal(err) } want := func() string { s := fmt.Sprintf("%q", test.wantErr1) if test.wantErr2 != "" { s = fmt.Sprintf("%q or %q", test.wantErr1, test.wantErr2) } return s } if err := modzip.Unzip(t.TempDir(), test.m, tmpZipPath); err == nil && test.wantErr1 != "" { t.Fatalf("unexpected success; want error containing %s", want()) } else if err != nil && test.wantErr1 == "" { t.Fatalf("got error %q; want success", err) } else if err != nil && !strings.Contains(err.Error(), test.wantErr1) && (test.wantErr2 == "" || !strings.Contains(err.Error(), test.wantErr2)) { t.Fatalf("got error %q; want error containing %s", err, want()) } }) } } // TestVCS clones a repository, creates a zip for a known version, // and verifies the zip file itself has the same SHA-256 hash as the one // 'go mod download' produces. // // This test is intended to build confidence that this implementation produces // the same output as the go command, given the same VCS zip input. This is // not intended to be a complete conformance test. The code that produces zip // archives from VCS repos is based on the go command, but it's for testing // only, and we don't export it. // // Note that we test the hash of the zip file itself. This is stricter than // testing the hash of the content, which is what we've promised users. // It's okay if the zip hash changes without changing the content hash, but // we should not let that happen accidentally. func TestVCS(t *testing.T) { if testing.Short() { t.Skip("skipping VCS cloning in -short mode") } var downloadErrorCount int32 const downloadErrorLimit = 3 _, gitErr := gitPath() _, hgErr := exec.LookPath("hg") haveVCS := map[string]bool{ "git": gitErr == nil, "hg": hgErr == nil, } for _, test := range []struct { m module.Version vcs, url, subdir, rev string wantContentHash, wantZipHash string }{ // Simple tests: all versions of rsc.io/quote + newer major versions { m: module.Version{Path: "rsc.io/quote", Version: "v1.0.0"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.0.0", wantContentHash: "h1:haUSojyo3j2M9g7CEUFG8Na09dtn7QKxvPGaPVQdGwM=", wantZipHash: "5c08ba2c09a364f93704aaa780e7504346102c6ef4fe1333a11f09904a732078", }, { m: module.Version{Path: "rsc.io/quote", Version: "v1.1.0"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.1.0", wantContentHash: "h1:n/ElL9GOlVEwL0mVjzaYj0UxTI/TX9aQ7lR5LHqP/Rw=", wantZipHash: "730a5ae6e5c4e216e4f84bb93aa9785a85630ad73f96954ebb5f9daa123dcaa9", }, { m: module.Version{Path: "rsc.io/quote", Version: "v1.2.0"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.2.0", wantContentHash: "h1:fFMCNi0A97hfNrtUZVQKETbuc3h7bmfFQHnjutpPYCg=", wantZipHash: "fe1bd62652e9737a30d6b7fd396ea13e54ad13fb05f295669eb63d6d33290b06", }, { m: module.Version{Path: "rsc.io/quote", Version: "v1.2.1"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.2.1", wantContentHash: "h1:l+HtgC05eds8qgXNApuv6g1oK1q3B144BM5li1akqXY=", wantZipHash: "9f0e74de55a6bd20c1567a81e707814dc221f07df176af2a0270392c6faf32fd", }, { m: module.Version{Path: "rsc.io/quote", Version: "v1.3.0"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.3.0", wantContentHash: "h1:aPUoHx/0Cd7BTZs4SAaknT4TaKryH766GcFTvJjVbHU=", wantZipHash: "03872ee7d6747bc2ee0abadbd4eb09e60f6df17d0a6142264abe8a8a00af50e7", }, { m: module.Version{Path: "rsc.io/quote", Version: "v1.4.0"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.4.0", wantContentHash: "h1:tYuJspOzwTRMUOX6qmSDRTEKFVV80GM0/l89OLZuVNg=", wantZipHash: "f60be8193c607bf197da01da4bedb3d683fe84c30de61040eb5d7afaf7869f2e", }, { m: module.Version{Path: "rsc.io/quote", Version: "v1.5.0"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.5.0", wantContentHash: "h1:mVjf/WMWxfIw299sOl/O3EXn5qEaaJPMDHMsv7DBDlw=", wantZipHash: "a2d281834ce159703540da94425fa02c7aec73b88b560081ed0d3681bfe9cd1f", }, { m: module.Version{Path: "rsc.io/quote", Version: "v1.5.1"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.5.1", wantContentHash: "h1:ptSemFtffEBvMed43o25vSUpcTVcqxfXU8Jv0sfFVJs=", wantZipHash: "4ecd78a6d9f571e84ed2baac1688fd150400db2c5b017b496c971af30aaece02", }, { m: module.Version{Path: "rsc.io/quote", Version: "v1.5.2"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.5.2", wantContentHash: "h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=", wantZipHash: "643fcf8ef4e4cbb8f910622c42df3f9a81f3efe8b158a05825a81622c121ca0a", }, { m: module.Version{Path: "rsc.io/quote", Version: "v1.5.3-pre1"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v1.5.3-pre1", wantContentHash: "h1:c3EJ21kn75/hyrOL/Dvj45+ifxGFSY8Wf4WBcoWTxF0=", wantZipHash: "24106f0f15384949df51fae5d34191bf120c3b80c1c904721ca2872cf83126b2", }, { m: module.Version{Path: "rsc.io/quote/v2", Version: "v2.0.1"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v2.0.1", wantContentHash: "h1:DF8hmGbDhgiIa2tpqLjHLIKkJx6WjCtLEqZBAU+hACI=", wantZipHash: "009ed42474a59526fe56a14a9dd02bd7f977d1bd3844398bd209d0da0484aade", }, { m: module.Version{Path: "rsc.io/quote/v3", Version: "v3.0.0"}, vcs: "git", url: "https://github.com/rsc/quote", rev: "v3.0.0", subdir: "v3", wantContentHash: "h1:OEIXClZHFMyx5FdatYfxxpNEvxTqHlu5PNdla+vSYGg=", wantZipHash: "cf3ff89056b785d7b3ef3a10e984efd83b47d9e65eabe8098b927b3370d5c3eb", }, // Test cases from vcs-test.golang.org { m: module.Version{Path: "vcs-test.golang.org/git/v3pkg.git/v3", Version: "v3.0.0"}, vcs: "git", url: "https://vcs-test.golang.org/git/v3pkg", rev: "v3.0.0", wantContentHash: "h1:mZhljS1BaiW8lODR6wqY5pDxbhXja04rWPFXPwRAtvA=", wantZipHash: "9c65f0d235e531008dc04e977f6fa5d678febc68679bb63d4148dadb91d3fe57", }, { m: module.Version{Path: "vcs-test.golang.org/go/custom-hg-hello", Version: "v0.0.0-20171010233936-a8c8e7a40da9"}, vcs: "hg", url: "https://vcs-test.golang.org/hg/custom-hg-hello", rev: "a8c8e7a40da9", wantContentHash: "h1:LU6jFCbwn5VVgTcj+y4LspOpJHLZvl5TGPE+LwwpMw4=", wantZipHash: "a1b12047da979d618c639ee98f370767a13d0507bd77785dc2f8dad66b40e2e6", }, // Latest versions of selected golang.org/x repos { m: module.Version{Path: "golang.org/x/arch", Version: "v0.0.0-20190927153633-4e8777c89be4"}, vcs: "git", url: "https://go.googlesource.com/arch", rev: "4e8777c89be4d9e61691fbe5d4e6c8838a7806f3", wantContentHash: "h1:QlVATYS7JBoZMVaf+cNjb90WD/beKVHnIxFKT4QaHVI=", wantZipHash: "d17551a0c4957180ec1507065d13dcdd0f5cd8bfd7dd735fb81f64f3e2b31b68", }, { m: module.Version{Path: "golang.org/x/blog", Version: "v0.0.0-20191017104857-0cd0cdff05c2"}, vcs: "git", url: "https://go.googlesource.com/blog", rev: "0cd0cdff05c251ad0c796cc94d7059e013311fc6", wantContentHash: "h1:IKGICrORhR1aH2xG/WqrnpggSNolSj5urQxggCfmj28=", wantZipHash: "0fed6b400de54da34b52b464ef2cdff45167236aaaf9a99ba8eba8855036faff", }, { m: module.Version{Path: "golang.org/x/crypto", Version: "v0.0.0-20191011191535-87dc89f01550"}, vcs: "git", url: "https://go.googlesource.com/crypto", rev: "87dc89f01550277dc22b74ffcf4cd89fa2f40f4c", wantContentHash: "h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=", wantZipHash: "88e47aa05eb25c6abdad7387ccccfc39e74541896d87b7b1269e9dd2fa00100d", }, { m: module.Version{Path: "golang.org/x/net", Version: "v0.0.0-20191014212845-da9a3fd4c582"}, vcs: "git", url: "https://go.googlesource.com/net", rev: "da9a3fd4c5820e74b24a6cb7fb438dc9b0dd377c", wantContentHash: "h1:p9xBe/w/OzkeYVKm234g55gMdD1nSIooTir5kV11kfA=", wantZipHash: "34901a85e6c15475a40457c2393ce66fb0999accaf2d6aa5b64b4863751ddbde", }, { m: module.Version{Path: "golang.org/x/sync", Version: "v0.0.0-20190911185100-cd5d95a43a6e"}, vcs: "git", url: "https://go.googlesource.com/sync", rev: "cd5d95a43a6e21273425c7ae415d3df9ea832eeb", wantContentHash: "h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=", wantZipHash: "9c63fe51b0c533b258d3acc30d9319fe78679ce1a051109c9dea3105b93e2eef", }, { m: module.Version{Path: "golang.org/x/sys", Version: "v0.0.0-20191010194322-b09406accb47"}, vcs: "git", url: "https://go.googlesource.com/sys", rev: "b09406accb4736d857a32bf9444cd7edae2ffa79", wantContentHash: "h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=", wantZipHash: "f26f2993757670b4d1fee3156d331513259757f17133a36966c158642c3f61df", }, { m: module.Version{Path: "golang.org/x/talks", Version: "v0.0.0-20191010201600-067e0d331fee"}, vcs: "git", url: "https://go.googlesource.com/talks", rev: "067e0d331feee4f8d0fa17d47444db533bd904e7", wantContentHash: "h1:8fnBMBUwliuiHuzfFw6kSSx79AzQpqkjZi3FSNIoqYs=", wantZipHash: "fab2129f3005f970dbf2247378edb3220f6bd36726acdc7300ae3bb0f129e2f2", }, { m: module.Version{Path: "golang.org/x/tools", Version: "v0.0.0-20191017205301-920acffc3e65"}, vcs: "git", url: "https://go.googlesource.com/tools", rev: "920acffc3e65862cb002dae6b227b8d9695e3d29", wantContentHash: "h1:GwXwgmbrvlcHLDsENMqrQTTIC2C0kIPszsq929NruKI=", wantZipHash: "7f0ab7466448190f8ad1b8cfb05787c3fb08f4a8f9953cd4b40a51c76ddebb28", }, { m: module.Version{Path: "golang.org/x/tour", Version: "v0.0.0-20191002171047-6bb846ce41cd"}, vcs: "git", url: "https://go.googlesource.com/tour", rev: "6bb846ce41cdca087b14c8e3560a679691c424b6", wantContentHash: "h1:EUlK3Rq8iTkQERnCnveD654NvRJ/ZCM9XCDne+S5cJ8=", wantZipHash: "d6a7e03e02e5f7714bd12653d319a3b0f6e1099c01b1f9a17bc3613fb31c9170", }, } { test := test testName := strings.ReplaceAll(test.m.String(), "/", "_") t.Run(testName, func(t *testing.T) { if have, ok := haveVCS[test.vcs]; !ok { t.Fatalf("unknown vcs: %s", test.vcs) } else if !have { t.Skipf("no %s executable in path", test.vcs) } t.Parallel() repo, dl, err := downloadVCSZip(t, test.vcs, test.url, test.rev, test.subdir) if err != nil { // This may fail if there's a problem with the network or upstream // repository. The package being tested doesn't directly interact with // VCS tools; the test just does this to simulate what the go command // does. So an error should cause a skip instead of a failure. But we // should fail after too many errors so we don't lose test coverage // when something changes permanently. n := atomic.AddInt32(&downloadErrorCount, 1) if n < downloadErrorLimit { t.Skipf("failed to download zip from repository: %v", err) } else { t.Fatalf("failed to download zip from repository (repeated failure): %v", err) } } // Create a module zip from that archive. // (adapted from cmd/go/internal/modfetch.codeRepo.Zip) info, err := dl.Stat() if err != nil { t.Fatal(err) } zr, err := zip.NewReader(dl, info.Size()) if err != nil { t.Fatal(err) } var files []modzip.File topPrefix := "" subdir := test.subdir if subdir != "" && !strings.HasSuffix(subdir, "/") { subdir += "/" } haveLICENSE := false for _, f := range zr.File { if !f.FileInfo().Mode().IsRegular() { continue } if topPrefix == "" { i := strings.Index(f.Name, "/") if i < 0 { t.Fatal("missing top-level directory prefix") } topPrefix = f.Name[:i+1] } if strings.HasSuffix(f.Name, "/") { // drop directory dummy entries continue } if !strings.HasPrefix(f.Name, topPrefix) { t.Fatal("zip file contains more than one top-level directory") } name := strings.TrimPrefix(f.Name, topPrefix) if !strings.HasPrefix(name, subdir) { continue } name = strings.TrimPrefix(name, subdir) if name == ".hg_archival.txt" { // Inserted by hg archive. // Not correct to drop from other version control systems, but too bad. continue } if name == "LICENSE" { haveLICENSE = true } files = append(files, zipFile{name: name, f: f}) } if !haveLICENSE && subdir != "" { license, err := downloadVCSFile(t, test.vcs, repo, test.rev, "LICENSE") if err != nil { t.Fatal(err) } files = append(files, fakeFile{ name: "LICENSE", size: uint64(len(license)), data: license, }) } tmpModZipFile, err := os.CreateTemp(t.TempDir(), "TestVCS-*.zip") if err != nil { t.Fatal(err) } tmpModZipPath := tmpModZipFile.Name() defer tmpModZipFile.Close() h := sha256.New() w := io.MultiWriter(tmpModZipFile, h) if err := modzip.Create(w, test.m, files); err != nil { t.Fatal(err) } if err := tmpModZipFile.Close(); err != nil { t.Fatal(err) } gotZipHash := hex.EncodeToString(h.Sum(nil)) if test.wantZipHash != gotZipHash { // If the test fails because the hash of the zip file itself differs, // that may be okay as long as the hash of the data within the zip file // does not change. For example, we might change the compression, // order, or alignment of files without affecting the extracted output. // We shouldn't make such a change unintentionally though, so this // test will fail either way. if gotSum, err := dirhash.HashZip(tmpModZipPath, dirhash.Hash1); err == nil && test.wantContentHash != gotSum { t.Fatalf("zip content hash: got %s, want %s", gotSum, test.wantContentHash) } else { t.Fatalf("zip file hash: got %s, want %s", gotZipHash, test.wantZipHash) } } }) } } func downloadVCSZip(t testing.TB, vcs, url, rev, subdir string) (repoDir string, dl *os.File, err error) { repoDir = t.TempDir() switch vcs { case "git": // Create a repository and download the revision we want. if _, err := run(t, repoDir, "git", "init", "--bare"); err != nil { return "", nil, err } if err := os.MkdirAll(filepath.Join(repoDir, "info"), 0777); err != nil { return "", nil, err } attrFile, err := os.OpenFile(filepath.Join(repoDir, "info", "attributes"), os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666) if err != nil { return "", nil, err } if _, err := attrFile.Write([]byte("\n* -export-subst -export-ignore\n")); err != nil { attrFile.Close() return "", nil, err } if err := attrFile.Close(); err != nil { return "", nil, err } if _, err := run(t, repoDir, "git", "remote", "add", "origin", "--", url); err != nil { return "", nil, err } var refSpec string if strings.HasPrefix(rev, "v") { refSpec = fmt.Sprintf("refs/tags/%[1]s:refs/tags/%[1]s", rev) } else { refSpec = fmt.Sprintf("%s:refs/dummy", rev) } if _, err := run(t, repoDir, "git", "fetch", "-f", "--depth=1", "origin", refSpec); err != nil { return "", nil, err } // Create an archive. tmpZipFile, err := os.CreateTemp(t.TempDir(), "downloadVCSZip-*.zip") if err != nil { return "", nil, err } t.Cleanup(func() { tmpZipFile.Close() }) subdirArg := subdir if subdir == "" { subdirArg = "." } cmd := exec.Command("git", "-c", "core.autocrlf=input", "-c", "core.eol=lf", "archive", "--format=zip", "--prefix=prefix/", rev, "--", subdirArg) cmd.Dir = repoDir cmd.Stdout = tmpZipFile stderr := new(strings.Builder) cmd.Stderr = stderr err = cmd.Run() if stderr.Len() > 0 && (err != nil || testing.Verbose()) { t.Logf("%v: %v\n%s", err, cmd, stderr) } else if err != nil { t.Logf("%v: %v", err, cmd) } else { t.Logf("%v", cmd) } if err != nil { return "", nil, err } if _, err := tmpZipFile.Seek(0, 0); err != nil { return "", nil, err } return repoDir, tmpZipFile, nil case "hg": // Clone the whole repository. if _, err := run(t, repoDir, "hg", "clone", "-U", "--", url, "."); err != nil { return "", nil, err } // Create an archive. tmpZipFile, err := os.CreateTemp(t.TempDir(), "downloadVCSZip-*.zip") if err != nil { return "", nil, err } tmpZipPath := tmpZipFile.Name() tmpZipFile.Close() args := []string{"archive", "-t", "zip", "--no-decode", "-r", rev, "--prefix=prefix/"} if subdir != "" { args = append(args, "-I", subdir+"/**") } args = append(args, "--", tmpZipPath) if _, err := run(t, repoDir, "hg", args...); err != nil { return "", nil, err } if tmpZipFile, err = os.Open(tmpZipPath); err != nil { return "", nil, err } t.Cleanup(func() { tmpZipFile.Close() }) return repoDir, tmpZipFile, err default: return "", nil, fmt.Errorf("vcs %q not supported", vcs) } } func downloadVCSFile(t testing.TB, vcs, repo, rev, file string) ([]byte, error) { t.Helper() switch vcs { case "git": return run(t, repo, "git", "cat-file", "blob", rev+":"+file) default: return nil, fmt.Errorf("vcs %q not supported", vcs) } } func run(t testing.TB, dir string, name string, args ...string) ([]byte, error) { t.Helper() cmd := exec.Command(name, args...) cmd.Dir = dir stderr := new(strings.Builder) cmd.Stderr = stderr out, err := cmd.Output() if stderr.Len() > 0 && (err != nil || testing.Verbose()) { t.Logf("%v: %v\n%s", err, cmd, stderr) } else if err != nil { t.Logf("%v: %v", err, cmd) } else { t.Logf("%v", cmd) } return out, err } type zipFile struct { name string f *zip.File } func (f zipFile) Path() string { return f.name } func (f zipFile) Lstat() (os.FileInfo, error) { return f.f.FileInfo(), nil } func (f zipFile) Open() (io.ReadCloser, error) { return f.f.Open() } func TestCreateFromVCS_basic(t *testing.T) { mustHaveGit(t) // Write files to a temporary directory. tmpDir, err := extractTxtarToTempDir(t, txtar.Parse([]byte(`-- go.mod -- module example.com/foo/bar go 1.12 -- LICENSE -- root license -- a.go -- package a var A = 5 -- b.go -- package a var B = 5 -- c/c.go -- package c var C = 5 -- d/d.go -- package c var D = 5 -- e/LICENSE -- e license -- e/e.go -- package e var E = 5 -- f/go.mod -- module example.com/foo/bar/f go 1.12 -- f/f.go -- package f var F = 5 -- .gitignore -- b.go c/`))) if err != nil { t.Fatal(err) } gitInit(t, tmpDir) gitCommit(t, tmpDir) for _, tc := range []struct { desc string version module.Version subdir string wantFiles []string wantData map[string]string }{ { desc: "from root", version: module.Version{Path: "example.com/foo/bar", Version: "v0.0.1"}, subdir: "", wantFiles: []string{"go.mod", "LICENSE", "a.go", "d/d.go", "e/LICENSE", "e/e.go", ".gitignore"}, wantData: map[string]string{"LICENSE": "root license\n"}, }, { desc: "from subdir", version: module.Version{Path: "example.com/foo/bar", Version: "v0.0.1"}, subdir: "d/", // Note: File paths are zipped as if the subdir were the root. ie d.go instead of d/d.go. // subdirs without a license hoist the license from the root wantFiles: []string{"d.go", "LICENSE"}, wantData: map[string]string{"LICENSE": "root license\n"}, }, { desc: "from subdir with license", version: module.Version{Path: "example.com/foo/bar", Version: "v0.0.1"}, subdir: "e/", // Note: File paths are zipped as if the subdir were the root. ie e.go instead of e/e.go. // subdirs with a license use their own wantFiles: []string{"LICENSE", "e.go"}, wantData: map[string]string{"LICENSE": "e license\n"}, }, { desc: "from submodule subdir", version: module.Version{Path: "example.com/foo/bar/f", Version: "v0.0.1"}, subdir: "f/", // Note: File paths are zipped as if the subdir were the root. ie f.go instead of f/f.go. // subdirs without a license hoist the license from the root wantFiles: []string{"go.mod", "f.go", "LICENSE"}, wantData: map[string]string{"LICENSE": "root license\n"}, }, } { t.Run(tc.desc, func(t *testing.T) { // Create zip from the directory. tmpZip := &bytes.Buffer{} if err := modzip.CreateFromVCS(tmpZip, tc.version, tmpDir, "HEAD", tc.subdir); err != nil { t.Fatal(err) } wantData := map[string]string{} for f, data := range tc.wantData { p := path.Join(tc.version.String(), f) wantData[p] = data } readerAt := bytes.NewReader(tmpZip.Bytes()) r, err := zip.NewReader(readerAt, int64(tmpZip.Len())) if err != nil { t.Fatal(err) } var gotFiles []string gotMap := map[string]bool{} for _, f := range r.File { gotMap[f.Name] = true gotFiles = append(gotFiles, f.Name) if want, ok := wantData[f.Name]; ok { r, err := f.Open() if err != nil { t.Errorf("CreatedFromVCS: error opening %s: %v", f.Name, err) continue } defer r.Close() got, err := io.ReadAll(r) if err != nil { t.Errorf("CreatedFromVCS: error reading %s: %v", f.Name, err) continue } if want != string(got) { t.Errorf("CreatedFromVCS: zipped file %s contains %s, expected %s", f.Name, string(got), want) continue } } } wantMap := map[string]bool{} for _, f := range tc.wantFiles { p := path.Join(tc.version.String(), f) wantMap[p] = true } // The things that should be there. for f := range gotMap { if !wantMap[f] { t.Errorf("CreatedFromVCS: zipped file contains %s, but expected it not to", f) } } // The things that are missing. for f := range wantMap { if !gotMap[f] { t.Errorf("CreatedFromVCS: zipped file doesn't contain %s, but expected it to. all files: %v", f, gotFiles) } } for f := range wantData { if !gotMap[f] { t.Errorf("CreatedFromVCS: zipped file doesn't contain %s, but expected it to. all files: %v", f, gotFiles) } } }) } } // Test what the experience of creating a zip from a v2 module is like. func TestCreateFromVCS_v2(t *testing.T) { mustHaveGit(t) // Write files to a temporary directory. tmpDir, err := extractTxtarToTempDir(t, txtar.Parse([]byte(`-- go.mod -- module example.com/foo/bar go 1.12 -- a.go -- package a var A = 5 -- b.go -- package a var B = 5 -- go.mod -- module example.com/foo/bar go 1.12 -- gaz/v2/a_2.go -- package a var C = 9 -- gaz/v2/b_2.go -- package a var B = 11 -- gaz/v2/go.mod -- module example.com/foo/bar/v2 go 1.12 -- .gitignore -- `))) if err != nil { t.Fatal(err) } gitInit(t, tmpDir) gitCommit(t, tmpDir) // Create zip from the directory. tmpZip := &bytes.Buffer{} m := module.Version{Path: "example.com/foo/bar/v2", Version: "v2.0.0"} if err := modzip.CreateFromVCS(tmpZip, m, tmpDir, "HEAD", "gaz/v2"); err != nil { t.Fatal(err) } readerAt := bytes.NewReader(tmpZip.Bytes()) r, err := zip.NewReader(readerAt, int64(tmpZip.Len())) if err != nil { t.Fatal(err) } var gotFiles []string gotMap := map[string]bool{} for _, f := range r.File { gotMap[f.Name] = true gotFiles = append(gotFiles, f.Name) } wantMap := map[string]bool{ "example.com/foo/bar/v2@v2.0.0/a_2.go": true, "example.com/foo/bar/v2@v2.0.0/b_2.go": true, "example.com/foo/bar/v2@v2.0.0/go.mod": true, } // The things that should be there. for f := range gotMap { if !wantMap[f] { t.Errorf("CreatedFromVCS: zipped file contains %s, but expected it not to", f) } } // The things that are missing. for f := range wantMap { if !gotMap[f] { t.Errorf("CreatedFromVCS: zipped file doesn't contain %s, but expected it to. all files: %v", f, gotFiles) } } } func TestCreateFromVCS_nonGitDir(t *testing.T) { mustHaveGit(t) // Write files to a temporary directory. tmpDir, err := extractTxtarToTempDir(t, txtar.Parse([]byte(`-- go.mod -- module example.com/foo/bar go 1.12 -- a.go -- package a var A = 5 `))) if err != nil { t.Fatal(err) } // Create zip from the directory. tmpZip, err := os.CreateTemp(t.TempDir(), "TestCreateFromDir-*.zip") if err != nil { t.Fatal(err) } defer tmpZip.Close() m := module.Version{Path: "example.com/foo/bar", Version: "v0.0.1"} err = modzip.CreateFromVCS(tmpZip, m, tmpDir, "HEAD", "") if err == nil { t.Fatal("CreateFromVCS: expected error, got nil") } var gotErr *modzip.UnrecognizedVCSError if !errors.As(err, &gotErr) { t.Errorf("CreateFromVCS: returned error does not unwrap to modzip.UnrecognizedVCSError, but expected it to. returned error: %v", err) } else if gotErr.RepoRoot != tmpDir { t.Errorf("CreateFromVCS: returned error has RepoRoot %q, but want %q. returned error: %v", gotErr.RepoRoot, tmpDir, err) } } func TestCreateFromVCS_zeroCommitsGitDir(t *testing.T) { mustHaveGit(t) // Write files to a temporary directory. tmpDir, err := extractTxtarToTempDir(t, txtar.Parse([]byte(`-- go.mod -- module example.com/foo/bar go 1.12 -- a.go -- package a var A = 5 `))) if err != nil { t.Fatal(err) } gitInit(t, tmpDir) // Create zip from the directory. tmpZip, err := os.CreateTemp(t.TempDir(), "TestCreateFromDir-*.zip") if err != nil { t.Fatal(err) } defer tmpZip.Close() m := module.Version{Path: "example.com/foo/bar", Version: "v0.0.1"} if err := modzip.CreateFromVCS(tmpZip, m, tmpDir, "HEAD", ""); err == nil { t.Error("CreateFromVCS: expected error, got nil") } } // gitInit runs "git init" at the specified dir. // // Note: some environments - and trybots - don't have git installed. This // function will cause the calling test to be skipped if that's the case. func gitInit(t testing.TB, dir string) { t.Helper() mustHaveGit(t) if _, err := run(t, dir, "git", "init"); err != nil { t.Fatal(err) } if _, err := run(t, dir, "git", "config", "user.name", "Go Gopher"); err != nil { t.Fatal(err) } if _, err := run(t, dir, "git", "config", "user.email", "gopher@golang.org"); err != nil { t.Fatal(err) } } func gitCommit(t testing.TB, dir string) { t.Helper() mustHaveGit(t) if _, err := run(t, dir, "git", "add", "-A"); err != nil { t.Fatal(err) } if _, err := run(t, dir, "git", "commit", "-m", "some commit"); err != nil { t.Fatal(err) } }