pax_global_header00006660000000000000000000000064133137677200014523gustar00rootroot0000000000000052 comment=470edde575954edf67a7bc372eb52cf237aa9b31 go-archive-1.0/000077500000000000000000000000001331376772000134075ustar00rootroot00000000000000go-archive-1.0/.gitignore000066400000000000000000000000051331376772000153720ustar00rootroot00000000000000*swp go-archive-1.0/LICENSE000066400000000000000000000020751331376772000144200ustar00rootroot00000000000000Copyright (c) Paul R. Tagliamonte , 2016 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. go-archive-1.0/README.md000066400000000000000000000001541331376772000146660ustar00rootroot00000000000000go-archive ========= `go-archive` are a set of non-production test bindings to work with a Debian archive. go-archive-1.0/archive.go000066400000000000000000000277111331376772000153670ustar00rootroot00000000000000package archive import ( "fmt" "io" "path" "path/filepath" "time" "crypto" "crypto/sha512" "golang.org/x/crypto/openpgp" "golang.org/x/crypto/openpgp/clearsign" "golang.org/x/crypto/openpgp/packet" "pault.ag/go/blobstore" "pault.ag/go/debian/control" "pault.ag/go/debian/dependency" "pault.ag/go/debian/hashio" ) // Archive {{{ // Core Archive abstrcation. This contains helpers to write out package files, // as well as handles creating underlying abstractions (such as Suites). type Archive struct { Store blobstore.Store signingKey *openpgp.Entity path string Pool Pool } // Create a new Archive at the given `root` on the filesystem, with the // openpgp.Entity `signer` (an Entity which contains an OpenPGP Private // Key). // // This interface is intended to *write* Archives, not *read* them. Extra // steps must be taken to load an Archive over the network, and attention // must be paid when handling the Cryptographic chain of trust. func New(path string, signer *openpgp.Entity) (*Archive, error) { var err error path, err = filepath.Abs(path) if err != nil { return nil, err } store, err := blobstore.Load(path) if err != nil { return nil, err } return &Archive{ Store: *store, signingKey: signer, path: path, Pool: Pool{Store: *store}, }, nil } func (a Archive) Path() string { return a.path } // Use the default backend to remove any unlinked files from the Blob store. // // If files you care about are not linked onto the stage, they will be removed // by the garbage collector. GC only when you're sure the stage has been // set. func (a Archive) GC() error { return a.Store.GC(blobstore.DumbGarbageCollector{}) } // Given a list of objects, link them to the keyed paths. func (a Archive) Link(blobs ArchiveState) error { for path, obj := range blobs { if err := a.Store.Link(obj, path); err != nil { return err } } return nil } // Create a new Release object from a Suite, passing off the Name, Description // and constructing the rest of the goodies. // // This will be an entirely empty object, without anything read off disk. func newRelease(suite Suite) (*Release, error) { when := time.Now() var validUntil string = "" if suite.features.Duration != "" { duration, err := time.ParseDuration(suite.features.Duration) if err != nil { return nil, err } validUntil = when.Add(duration).In(time.UTC).Format(time.RFC1123Z) } release := Release{ Suite: suite.Name, Description: suite.Description, ValidUntil: validUntil, Origin: suite.Origin, Label: suite.Label, Version: suite.Version, } release.Date = when.In(time.UTC).Format(time.RFC1123Z) release.Architectures = []dependency.Arch{} release.Components = []string{} release.SHA256 = []control.SHA256FileHash{} release.SHA1 = []control.SHA1FileHash{} release.SHA512 = []control.SHA512FileHash{} release.MD5Sum = []control.MD5FileHash{} return &release, nil } // This is a set of file changes ready to be passed to `Link` to link in. // Basically, this maps file paths to blobstore objects, which will be // swapped in all at once. This allows errors to avoid mutating state in // the archive. type ArchiveState map[string]blobstore.Object // Engross a Suite for signing and final commit into the blobstore. This // will return handle(s) to the signed and ready Objects, fit for passage // to Link. // // This will contain all the related Packages and Release files. func (a Archive) Engross(suite Suite) (ArchiveState, error) { release, err := newRelease(suite) if err != nil { return nil, err } files := ArchiveState{} arches := map[dependency.Arch]bool{} for name, component := range suite.components { release.Components = append(release.Components, name) for arch, writer := range component.packageWriters { arches[arch] = true // For each Binary entry, do the same as above (todo: someone // DRY this out a bit. I'm too lazy. suitePath := path.Join(name, fmt.Sprintf("binary-%s", arch), "Packages") obj, err := a.Store.Commit(*writer.handle) if err != nil { return nil, err } for _, hasher := range writer.hashers { fileHash := control.FileHashFromHasher(suitePath, *hasher) release.AddHash(fileHash) } filePath := path.Join("dists", suite.Name, suitePath) files[filePath] = *obj } } for arch, _ := range arches { release.Architectures = append(release.Architectures, arch) } /* Now, let's do some magic */ // Now, let's write out the Release file (and sign it normally) obj, sig, err := suite.archive.encodeSigned(release) if err != nil { return nil, err } filePath := path.Join("dists", suite.Name, "Release") files[filePath] = *obj files[fmt.Sprintf("%s.gpg", filePath)] = *sig // Ditto with the clearsigned version (Should we merge the two above?) obj, err = suite.archive.encodeClearsigned(release) if err != nil { return nil, err } files[path.Join("dists", suite.Name, "InRelease")] = *obj return files, nil } // Given a control.Marshal'able object, encode it to the blobstore, while // also clearsigning the data. func (a Archive) encodeClearsigned(data interface{}) (*blobstore.Object, error) { if a.signingKey == nil { return nil, fmt.Errorf("No signing key loaded") } fd, err := a.Store.Create() if err != nil { return nil, err } defer fd.Close() wc, err := clearsign.Encode(fd, a.signingKey.PrivateKey, &packet.Config{ DefaultHash: crypto.SHA512, }) if err != nil { return nil, err } encoder, err := control.NewEncoder(wc) if err != nil { return nil, err } if err := encoder.Encode(data); err != nil { return nil, err } if err := wc.Close(); err != nil { return nil, err } return a.Store.Commit(*fd) } // Given a control.Marshal'able object, encode it to the blobstore, while // also doing a detached OpenPGP signature. The objects returned (in order) // are data, commited to the blobstore, the signature for that object, commited // to the blobstore, and any error(s), finally. func (a Archive) encodeSigned(data interface{}) (*blobstore.Object, *blobstore.Object, error) { /* Right, so, the trick here is that we secretly call out to encode, * but tap it with a pipe into the signing code */ if a.signingKey == nil { return nil, nil, fmt.Errorf("No signing key loaded") } signature, err := a.Store.Create() if err != nil { return nil, nil, err } defer signature.Close() hash := sha512.New() obj, err := a.encode(data, hash) if err != nil { return nil, nil, err } sig := new(packet.Signature) sig.SigType = packet.SigTypeBinary sig.PubKeyAlgo = a.signingKey.PrivateKey.PubKeyAlgo sig.Hash = crypto.SHA512 sig.CreationTime = new(packet.Config).Now() sig.IssuerKeyId = &(a.signingKey.PrivateKey.KeyId) err = sig.Sign(hash, a.signingKey.PrivateKey, &packet.Config{ DefaultHash: crypto.SHA512, }) if err != nil { return nil, nil, err } if err := sig.Serialize(signature); err != nil { return nil, nil, err } sigObj, err := a.Store.Commit(*signature) if err != nil { return nil, nil, err } return obj, sigObj, nil } // Encode a given control.Marshal'able object into the Blobstore, and return // a handle to its object. // // The optinal argument `tap` will be written to as the object gets sent into // the blobstore. This may be useful if you wish to have a copy of the data // going into the store. func (a Archive) encode(data interface{}, tap io.Writer) (*blobstore.Object, error) { fd, err := a.Store.Create() if err != nil { return nil, err } var writer io.Writer = fd if tap != nil { writer = io.MultiWriter(fd, tap) } encoder, err := control.NewEncoder(writer) if err != nil { return nil, err } if err := encoder.Encode(data); err != nil { return nil, err } return a.Store.Commit(*fd) } // }}} // Suite {{{ // Abstraction to handle writing data into a Suite. This is a write-only // target, and is not intended to read a Release file. // // This contains no state read off disk, and is purely for writing to. type Suite struct { control.Paragraph archive *Archive Name string `control:"Suite"` Description string Origin string Label string Version string components map[string]*Component `control:"-"` features struct { Hashes []string Duration string } `control:"-"` } // Get a handle to write a given Suite from an Archive. // The suite will be entirely blank, and attributes will not be // read from the existing files, if any. func (a Archive) Suite(name string) (*Suite, error) { suite := Suite{ Name: name, archive: &a, components: map[string]*Component{}, } suite.features.Hashes = []string{"sha256", "sha1", "sha512"} suite.features.Duration = "168h" return &suite, nil } // Get or create a Component for a given Suite. If no such Component // has been created so far, this will create a new object, otherwise // it will return the existing entry. // // This contains no state read off disk, and is purely for writing to. func (s Suite) Component(name string) (*Component, error) { if _, ok := s.components[name]; !ok { comp, err := newComponent(&s) if err != nil { return nil, err } s.components[name] = comp return comp, nil } el := s.components[name] return el, nil } // }}} // Component {{{ // Small wrapper to represent a Component of a Suite, which, at its core // is simply a set of Indexes to be written to. // // This contains no state read off disk, and is purely for writing to. type Component struct { suite *Suite packageWriters map[dependency.Arch]*IndexWriter } // Create a new Component, configured for use. func newComponent(suite *Suite) (*Component, error) { return &Component{ suite: suite, packageWriters: map[dependency.Arch]*IndexWriter{}, }, nil } // Get a given IndexWriter for an arch, or create one if none exists. func (c *Component) getWriter(arch dependency.Arch) (*IndexWriter, error) { if _, ok := c.packageWriters[arch]; !ok { writer, err := newIndexWriter(c.suite) if err != nil { return nil, err } c.packageWriters[arch] = writer } return c.packageWriters[arch], nil } // Add a given Package to a Package List. Under the hood, this will // get or create a IndexWriter, and invoke the .Add method on the // Package Writer. func (c *Component) AddPackage(pkg Package) error { writer, err := c.getWriter(pkg.Architecture) if err != nil { return err } return writer.Add(pkg) } // }}} // IndexWriter {{{ // This writer represents a Package list - which is to say, a list of // binary .deb files, for a particular Architecture, in a particular Component // in a particular Suite, in a particular Archive. // // This is not an encapsulation to store the entire Index in memory, rather, // it's a wrapper to help write Package entries into the Index. type IndexWriter struct { archive *Archive handle *blobstore.Writer closer func() error encoder *control.Encoder hashers []*hashio.Hasher } func getHashers(suite *Suite) (io.Writer, []*hashio.Hasher, error) { ret := []*hashio.Hasher{} writers := []io.Writer{} for _, algo := range suite.features.Hashes { hasher, err := hashio.NewHasher(algo) if err != nil { return nil, nil, err } writers = append(writers, hasher) ret = append(ret, hasher) } return io.MultiWriter(writers...), ret, nil } // given a Suite, create a new Package Writer, configured with // the appropriate Hashing, and targeting a new file blob in the // underlying blobstore. func newIndexWriter(suite *Suite) (*IndexWriter, error) { handle, err := suite.archive.Store.Create() if err != nil { return nil, err } writer, hashers, err := getHashers(suite) if err != nil { return nil, err } encoder, err := control.NewEncoder(io.MultiWriter(writer, handle)) if err != nil { handle.Close() return nil, err } return &IndexWriter{ archive: suite.archive, closer: handle.Close, encoder: encoder, handle: handle, hashers: hashers, }, nil } // Write a Package entry into the Packages index. func (p IndexWriter) Add(data interface{}) error { return p.encoder.Encode(data) } // }}} // vim: foldmethod=marker go-archive-1.0/doc.go000066400000000000000000000003121331376772000144770ustar00rootroot00000000000000/* This module is experimental and incomplete. Please be careful. The archive module provides a few `pault.ag/go/debian` compatable bindings to read and write Debian apt archives. */ package archive go-archive-1.0/downloader.go000066400000000000000000000227601331376772000161030ustar00rootroot00000000000000package archive import ( "bufio" "fmt" "io" "io/ioutil" "log" "net/http" "os" "path/filepath" "strings" "sync" "time" "golang.org/x/crypto/openpgp" "pault.ag/go/debian/control" "pault.ag/go/debian/deb" ) type pool struct { ch chan bool } // newPool constructs a pool which can be used by up to n workers at // the same time. func newPool(n int) *pool { return &pool{ ch: make(chan bool, n), } } func (p *pool) lock() { p.ch <- true } func (p *pool) unlock() { <-p.ch } // Downloader makes files from the Debian archive available. type Downloader struct { // Parallel limits the maximum number of concurrent archive accesses. Parallel int // MaxTransientRetries caps retries of transient errors. // The default value of 0 means retry forever. MaxTransientRetries int // Mirror is the HTTP URL of a Debian mirror, e.g. "https://deb.debian.org/debian". // Mirror supports TLS and HTTP/2. Mirror string // LocalMirror overrides Mirror with a local file system path. // E.g. /srv/mirrors/debian on DSA-maintained machines. LocalMirror string // TempDir is passed as dir argument to ioutil.TempFile. // The default value of empty string uses the default directory, see os.TempDir. TempDir string once sync.Once pool *pool // Keyring is used for validating archive GPG signatures. If nil, the // keyring is loaded from DebianArchiveKeyring. Keyring openpgp.EntityList } type transientError struct { error } // open returns an io.ReadCloser for reading fn from the archive, and fns last // modification time. func (g *Downloader) open(fn string) (io.ReadCloser, time.Time, error) { if g.LocalMirror != "" { f, err := os.Open(filepath.Join(g.LocalMirror, fn)) if err != nil { return nil, time.Time{}, err } fi, err := f.Stat() if err != nil { return nil, time.Time{}, err } return f, fi.ModTime(), nil } u := strings.TrimSuffix(g.Mirror, "/") + "/" + fn resp, err := http.Get(u) if err != nil { return nil, time.Time{}, transientError{err} } if got, want := resp.StatusCode, http.StatusOK; got != want { err := fmt.Errorf("download(%s): unexpected HTTP status code: got %d, want %d", u, got, want) // Not entirely accurate or exhaustive, but HTTP 5xx is generally // transient. if resp.StatusCode >= 500 && resp.StatusCode < 600 { return nil, time.Time{}, transientError{err} } return nil, time.Time{}, err } modTime, err := http.ParseTime(resp.Header.Get("Last-Modified")) if err != nil { return nil, time.Time{}, err } return resp.Body, modTime, nil } func (g *Downloader) tempFileWithFilename(verifier io.WriteCloser, decompressor deb.DecompressorFunc, fn string) (*os.File, error) { g.pool.lock() defer g.pool.unlock() f, err := ioutil.TempFile(g.TempDir, "archive-") if err != nil { return nil, err } var ( r io.ReadCloser modTime time.Time ) for retry := 0; ; retry++ { var err error r, modTime, err = g.open(fn) if err == nil { break } if te, ok := err.(transientError); ok && retry < g.MaxTransientRetries { log.Printf("transient error %v, retrying (attempt %d of %d)", te, retry, g.MaxTransientRetries) continue } os.Remove(f.Name()) f.Close() return nil, err } defer r.Close() if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, err } rd, err := decompressor(io.TeeReader(r, verifier)) if err != nil { return nil, err } w := bufio.NewWriter(f) if _, err := io.Copy(w, rd); err != nil { return nil, err } if err := verifier.Close(); err != nil { return nil, err } if err := w.Flush(); err != nil { return nil, err } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, err } if err := os.Chtimes(f.Name(), modTime, modTime); err != nil { return nil, err } return f, nil } // TempFile calls ioutil.TempFile, then downloads fh from the archive and // returns it. // // If hash checksum verification fails, the temporary file will be deleted and // an error will be returned. // // If err is nil, the caller must remove the file when no longer needed: // f, err := r.GetTempFile(fh) // if err != nil { // return nil, err // } // defer f.Close() // avoid leaking resources // defer os.Remove(f.Name()) // remove from file system // return parseSources(f) // // Remember that files must be closed before they can be read by external processes: // f, err := r.GetTempFile(fh) // if err != nil { // return err // } // if err := f.Close(); err != nil { // return err // } // defer os.Remove(f.Name()) // remove from file system // return exec.Command("tar", "xf", f.Name()).Run() func (g *Downloader) TempFile(fh control.FileHash) (*os.File, error) { if err := g.init(); err != nil { return nil, err } verifier, err := fh.Verifier() if err != nil { return nil, err } decompressor := deb.DecompressorFor(filepath.Ext(fh.Filename)) return g.tempFileWithFilename(verifier, decompressor, fh.Filename) } func (g *Downloader) init() error { var err error g.once.Do(func() { g.pool = newPool(g.Parallel) if g.Keyring == nil { err = g.loadArchiveKeyrings() } }) return err } // DebianArchiveKeyring is the full path to the GPG keyring containing the // public keys used for signing the Debian archive. const DebianArchiveKeyring = "/usr/share/keyrings/debian-archive-keyring.gpg" // loadArchiveKeyrings loads the debian-archive-keyring.gpg keyring // shipped in the debian-archive-keyring Debian package (NOT all // trusted keys stored in /etc/apt/trusted.gpg.d). func (g *Downloader) loadArchiveKeyrings() error { f, err := os.Open(DebianArchiveKeyring) if err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s not found. On Debian, install the debian-archive-keyring package.", DebianArchiveKeyring) } return err } defer f.Close() g.Keyring, err = openpgp.ReadKeyRing(f) return err } // ReleaseDownloader is like Downloader, but for a specific release // (e.g. unstable). type ReleaseDownloader struct { // LastModified contains the last modification timestamp of the release // metadata file. LastModified time.Time acquireByHash bool g *Downloader suite string } // GetTempFile is like Downloader.GetTempFile, but for fhs of the release. func (r *ReleaseDownloader) TempFile(fh control.FileHash) (*os.File, error) { fn := "dists/" + r.suite + "/" + fh.Filename if r.acquireByHash { fn = fh.ByHashPath(fn) } verifier, err := fh.Verifier() if err != nil { return nil, err } decompressor := deb.DecompressorFor(filepath.Ext(fh.Filename)) return r.g.tempFileWithFilename(verifier, decompressor, fn) } type noopVerifier struct{} func (*noopVerifier) Write([]byte) (int, error) { return 0, nil } func (*noopVerifier) Close() error { return nil } // Release returns a release and a corresponding ReleaseDownloader from the archive. // // If cryptographic verification using DebianArchiveKeyring fails, an error will // be returned. func (g *Downloader) Release(suite string) (*Release, *ReleaseDownloader, error) { if err := g.init(); err != nil { return nil, nil, err } u := "dists/" + suite + "/InRelease" if strings.HasSuffix(suite, "stable") { // Only testing (buster) has InRelease at this point, so fall back to // Release for *stable: u = "dists/" + suite + "/Release" } verifier := &noopVerifier{} // verification happens in LoadInRelease decompressor := deb.DecompressorFor("") // InRelease is not compressed f, err := g.tempFileWithFilename(verifier, decompressor, u) if err != nil { return nil, nil, err } defer os.Remove(f.Name()) defer f.Close() r, err := LoadInRelease(f, &g.Keyring) if err != nil { return nil, nil, fmt.Errorf("LoadInRelease(%s): %v", u, err) } fi, err := f.Stat() if err != nil { return nil, nil, err } return r, &ReleaseDownloader{fi.ModTime(), r.AcquireByHash, g, suite}, nil } // DefaultDownloader is a ready-to-use Downloader, used by convenience wrappers // such as CachedRelease and, by extension, TempFile. var DefaultDownloader = &Downloader{ Parallel: 10, MaxTransientRetries: 3, Mirror: "https://deb.debian.org/debian", } type cachedRelease struct { r *Release rd *ReleaseDownloader err error } var ( releaseCacheMu sync.Mutex releaseCache = make(map[string]cachedRelease) ) // CachedRelease returns DefaultDownloader.Release(suite), caching releases for // the duration of the process. func CachedRelease(suite string) (*Release, *ReleaseDownloader, error) { releaseCacheMu.Lock() cached, ok := releaseCache[suite] releaseCacheMu.Unlock() if ok { return cached.r, cached.rd, cached.err } r, rd, err := DefaultDownloader.Release(suite) releaseCacheMu.Lock() defer releaseCacheMu.Unlock() if cached, ok := releaseCache[suite]; ok { // Another goroutine raced us, return cached values for consistency: return cached.r, cached.rd, cached.err } releaseCache[suite] = cachedRelease{r, rd, err} return r, rd, err } // TempFile expects a path starting with dists/, calls Release(suite), // looks up the remaining path within the release and calls TempFile on the // corresponding ReleaseDownloader. func TempFile(path string) (*os.File, error) { if !strings.HasPrefix(path, "dists/") { return nil, fmt.Errorf("path %q does not start with dists/", path) } path = strings.TrimPrefix(path, "dists/") suite := strings.Split(path, "/")[0] r, rd, err := CachedRelease(suite) if err != nil { return nil, err } remainder := strings.TrimPrefix(path, suite+"/") fhs, ok := r.Indices()[remainder] if !ok { return nil, fmt.Errorf("%s not found", remainder) } return rd.TempFile(fhs[0]) } go-archive-1.0/packages.go000066400000000000000000000121651331376772000155210ustar00rootroot00000000000000/* {{{ Copyright (c) Paul R. Tagliamonte , 2015 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. }}} */ package archive import ( "fmt" "io" "os" "strconv" "crypto/md5" "crypto/sha1" "crypto/sha256" "hash" "pault.ag/go/debian/control" "pault.ag/go/debian/deb" "pault.ag/go/debian/dependency" "pault.ag/go/debian/version" ) // Package {{{ // Binary .deb Package entry, as it exists in the Packages file, which // contains the .deb Control information, as well as information on // where the file lives, the file size, and some hashes. type Package struct { control.Paragraph Package string `required:"true"` Source SourceName Version version.Version `required:"true"` Section string Priority string Architecture dependency.Arch `required:"true"` Essential string InstalledSize int `control:"Installed-Size"` Maintainer string `required:"true"` Description string `required:"true"` Homepage string Filename string `required:"true"` Size int `required:"true"` MD5sum string SHA1 string SHA256 string SHA512 string DescriptionMD5 string `control:"Description-md5"` Depends dependency.Dependency Suggests dependency.Dependency BuiltUsing dependency.Dependency `control:"Built-Using"` Breaks dependency.Dependency PreDepends dependency.Dependency `control:"Pre-Depends"` } // PackageFromDeb {{{ // Create a Package entry from a deb.Deb file. This will copy the binary // .deb Control file into the Package entry, and set information as to // the location of the file, the size of the file, and hash the file. func PackageFromDeb(debFile deb.Deb) (*Package, error) { pkg := Package{} paragraph := debFile.Control.Paragraph paragraph.Set("Filename", debFile.Path) /* Now, let's do some magic */ fd, err := os.Open(debFile.Path) if err != nil { return nil, err } stat, err := fd.Stat() if err != nil { return nil, err } paragraph.Set("Size", strconv.Itoa(int(stat.Size()))) /* Right, now, in addition, we ought to hash the crap out of the file */ md5sum := md5.New() sha1 := sha1.New() sha256 := sha256.New() writer := io.MultiWriter(md5sum, sha256, sha1) if _, err := io.Copy(writer, fd); err != nil { return nil, err } for key, hasher := range map[string]hash.Hash{ "MD5sum": md5sum, "SHA1": sha1, "SHA256": sha256, } { paragraph.Set(key, fmt.Sprintf("%x", hasher.Sum(nil))) } return &pkg, control.UnpackFromParagraph(debFile.Control.Paragraph, &pkg) } // }}} // }}} // Packages {{{ // Iterator to access the entries contained in the Packages entry in an // apt repo. This contians information about the binary Debian packages. type Packages struct { decoder *control.Decoder } // Map {{{ // Get any packages that match the criteria func (p *Packages) Map(q func(*Package) bool) ([]Package, error) { ret := []Package{} for { pkg, err := p.Next() if err == io.EOF { return ret, nil } if err != nil { return nil, err } if q(pkg) { ret = append(ret, *pkg) } } return ret, nil } // }}} // Next {{{ // Get the next Package entry in the Packages list. This will return an // io.EOF at the last entry. func (p *Packages) Next() (*Package, error) { next := Package{} return &next, p.decoder.Decode(&next) } // }}} // LoadPackagesFile {{{ // Given a path, create a Packages iterator. Note that the Packages // file is not OpenPGP signed, so one will need to verify the integrety // of this file from the InRelease file before trusting any output. func LoadPackagesFile(path string) (*Packages, error) { fd, err := os.Open(path) if err != nil { return nil, err } return LoadPackages(fd) } // }}} // LoadPackages {{{ // Given an io.Reader, create a Packages iterator. Note that the Packages // file is not OpenPGP signed, so one will need to verify the integrety // of this file from the InRelease file before trusting any output. func LoadPackages(in io.Reader) (*Packages, error) { decoder, err := control.NewDecoder(in, nil) if err != nil { return nil, err } return &Packages{decoder: decoder}, nil } // }}} // }}} // vim: foldmethod=marker go-archive-1.0/pool.go000066400000000000000000000034121331376772000147070ustar00rootroot00000000000000package archive import ( "fmt" "io" "os" "path" "pault.ag/go/blobstore" "pault.ag/go/debian/control" "pault.ag/go/debian/deb" ) type Pool struct { Store blobstore.Store } func poolPrefix(source string) string { return path.Join(source[0:1], source) } func (p Pool) Copy(path string) (*blobstore.Object, error) { fd, err := os.Open(path) if err != nil { return nil, err } defer fd.Close() writer, err := p.Store.Create() if err != nil { return nil, err } defer writer.Close() if _, err := io.Copy(writer, fd); err != nil { return nil, err } obj, err := p.Store.Commit(*writer) if err != nil { return nil, err } return obj, nil } func (p Pool) IncludeSources(dsc *control.DSC) (string, map[string]blobstore.Object, error) { files := map[string]blobstore.Object{} targetDir := path.Join("pool", poolPrefix(dsc.Source)) for _, file := range dsc.Files { obj, err := p.Copy(file.Filename) if err != nil { return "", nil, err } localName := path.Base(file.Filename) files[path.Join(targetDir, localName)] = *obj } obj, err := p.Copy(dsc.Filename) if err != nil { return "", nil, err } localName := path.Base(dsc.Filename) files[path.Join(targetDir, localName)] = *obj for path, object := range files { if err := p.Store.Link(object, path); err != nil { return "", nil, err } } return targetDir, files, nil } func (p Pool) IncludeDeb(debFile *deb.Deb) (string, *blobstore.Object, error) { obj, err := p.Copy(debFile.Path) if err != nil { return "", nil, err } debPath := path.Join( "pool", poolPrefix(debFile.Control.SourceName()), fmt.Sprintf( "%s_%s_%s.deb", debFile.Control.Package, debFile.Control.Version, debFile.Control.Architecture, ), ) return debPath, obj, p.Store.Link(*obj, debPath) } go-archive-1.0/release.go000066400000000000000000000221231331376772000153560ustar00rootroot00000000000000/* {{{ Copyright (c) Paul R. Tagliamonte , 2015 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. }}} */ package archive import ( "fmt" "io" "os" "golang.org/x/crypto/openpgp" "pault.ag/go/debian/control" "pault.ag/go/debian/dependency" ) // Release {{{ // The file "dists/$DIST/InRelease" shall contain meta-information about the // distribution and checksums for the indices, possibly signed with a GPG // clearsign signature (for example created by "gpg -a -s --clearsign"). For // older clients there can also be a "dists/$DIST/Release" file without any // signature and the file "dists/$DIST/Release.gpg" with a detached GPG // signature of the "Release" file, compatible with the format used by the GPG // options "-a -b -s". type Release struct { control.Paragraph Description string // Optional field indicating the origin of the repository, a single line // of free form text. Origin string // Optional field including some kind of label, a single line of free form // text. // // Typically used extensively in repositories split over multiple media // such as repositories stored on CDs. Label string // The Version field, if specified, shall be the version of the release. // This is usually a sequence of integers separated by the character // "." (full stop). // // Example: // // Version: 6.0 Version string // The Suite field may describe the suite. A suite is a single word. In // Debian, this shall be one of oldstable, stable, testing, unstable, // or experimental; with optional suffixes such as -updates. // // Example: // // Suite: stable Suite string // The Codename field shall describe the codename of the release. A // codename is a single word. Debian releases are codenamed after Toy // Story Characters, and the unstable suite has the codename sid, the // experimental suite has the codename experimental. // // Example: // // Codename: squeeze Codename string // A whitespace separated list of areas. // // Example: // // Components: main contrib non-free // // May also include be prefixed by parts of the path following the // directory beneath dists, if the Release file is not in a directory // directly beneath dists/. As an example, security updates are specified // in APT as: // // deb http://security.debian.org/ stable/updates main) // // The Release file would be located at // http://security.debian.org/dists/stable/updates/Release and look like: // // Suite: stable // Components: updates/main updates/contrib updates/non-free Components []string `delim:" "` // Whitespace separated unique single words identifying Debian machine // architectures as described in Architecture specification strings, // Section 11.1. Clients should ignore Architectures they do not know // about. Architectures []dependency.Arch // The Date field shall specify the time at which the Release file was // created. Clients updating a local on-disk cache should ignore a Release // file with an earlier date than the date in the already stored Release // file. // // The Valid-Until field may specify at which time the Release file should // be considered expired by the client. Client behaviour on expired Release // files is unspecified. // // The format of the dates is the same as for the Date field in .changes // files; and as used in debian/changelog files, and documented in Policy // 4.4 ( Debian changelog: debian/changelog) Date string ValidUntil string `control:"Valid-Until"` // note the upper-case S in MD5Sum (unlike in Packages and Sources files) // // These fields are used for two purposes: // // describe what package index files are present when release signature is // available it certifies that listed index files and files referenced by // those index files are genuine Those fields shall be multi-line fields // containing multiple lines of whitespace separated data. Each line shall // contain // // The checksum of the file in the format corresponding to the field The // size of the file (integer >= 0) The filename relative to the directory // of the Release file Each datum must be separated by one or more // whitespace characters. // // Server requirements: // // The checksum and sizes shall match the actual existing files. If indexes // are compressed, checksum data must be provided for uncompressed files as // well, even if not present on the server. Client behaviour: // // Any file should be checked at least once, either in compressed or // uncompressed form, depending on which data is available. If a file has // no associated data, the client shall inform the user about this under // possibly dangerous situations (such as installing a package from that // repository). If a file does not match the data specified in the release // file, the client shall not use any information from that file, inform // the user, and might use old information (such as the previous locally // kept information) instead. MD5Sum []control.MD5FileHash `delim:"\n" strip:" \t\n\r" multiline:"true"` SHA1 []control.SHA1FileHash `delim:"\n" strip:" \t\n\r" multiline:"true"` SHA256 []control.SHA256FileHash `delim:"\n" strip:" \t\n\r" multiline:"true"` SHA512 []control.SHA512FileHash `delim:"\n" strip:" \t\n\r" multiline:"true"` // The NotAutomatic and ButAutomaticUpgrades fields are optional boolean // fields instructing the package manager. They may contain the values // "yes" and "no". If one the fields is not specified, this has the same // meaning as a value of "no". // // If a value of "yes" is specified for the NotAutomatic field, a package // manager should not install packages (or upgrade to newer versions) from // this repository without explicit user consent (APT assigns priority 1 to // this) If the field ButAutomaticUpgrades is specified as well and has the // value "yes", the package manager should automatically install package // upgrades from this repository, if the installed version of the package // is higher than the version of the package in other sources (APT assigns // priority 100). // // Specifying "yes" for ButAutomaticUpgrades without specifying "yes" for // NotAutomatic is invalid. NotAutomatic string ButAutomaticUpgrades string AcquireByHash bool `control:"Acquire-By-Hash"` } // Given a file declared in the Release file, get the FileHash entries // for that file (SHA256, SHA512). These can be used to ensure the // integrety of files in the archive. func (r *Release) Indices() map[string]control.FileHashes { ret := map[string]control.FileHashes{} // https://wiki.debian.org/DebianRepository/Format#Size.2C_MD5sum.2C_SHA1.2C_SHA256.2C_SHA512: // Clients may not use the MD5Sum and SHA1 fields for security purposes, and // must require a SHA256 or a SHA512 field. for _, el := range r.SHA256 { ret[el.Filename] = append(ret[el.Filename], el.FileHash) } for _, el := range r.SHA512 { ret[el.Filename] = append(ret[el.Filename], el.FileHash) } return ret } func (r *Release) AddHash(h control.FileHash) error { switch h.Algorithm { case "sha256": r.SHA256 = append(r.SHA256, control.SHA256FileHash{h}) case "sha1": r.SHA1 = append(r.SHA1, control.SHA1FileHash{h}) case "sha512": r.SHA512 = append(r.SHA512, control.SHA512FileHash{h}) case "md5": r.MD5Sum = append(r.MD5Sum, control.MD5FileHash{h}) default: return fmt.Errorf("No known hash: '%s'", h.Algorithm) } return nil } // }}} // LoadInRelease {{{ // Given an InRelease io.Reader, and the OpenPGP keyring // to validate against, return the parsed InRelease file. func LoadInRelease(in io.Reader, keyring *openpgp.EntityList) (*Release, error) { ret := Release{} decoder, err := control.NewDecoder(in, keyring) if err != nil { return nil, err } return &ret, decoder.Decode(&ret) } // }}} // LoadInReleaseFile {{{ // Given a path to the InRelease file on the filesystem, and the OpenPGP keyring // to validate against, return the parsed InRelease file. func LoadInReleaseFile(path string, keyring *openpgp.EntityList) (*Release, error) { fd, err := os.Open(path) if err != nil { return nil, err } return LoadInRelease(fd, keyring) } // }}} // vim: foldmethod=marker go-archive-1.0/sources.go000066400000000000000000000121171331376772000154230ustar00rootroot00000000000000/* {{{ Copyright (c) Paul R. Tagliamonte , 2015 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. }}} */ package archive import ( "fmt" "io" "os" "strings" "pault.ag/go/debian/control" // "pault.ag/go/debian/deb" "pault.ag/go/debian/dependency" "pault.ag/go/debian/version" ) type SourceName struct { Name string Version version.Version } func (sn *SourceName) UnmarshalControl(data string) error { hunks := strings.Split(data, " ") var err error switch len(hunks) { case 1: sn.Name = hunks[0] return nil case 2: sn.Name = hunks[0] sn.Version, err = version.Parse(hunks[1][1 : len(hunks[1])-1]) if err != nil { return err } return nil default: return fmt.Errorf("Source entry is malformed: %s\n", data) } } func (sn *SourceName) MarshalControl() (string, error) { if sn.Version.Empty() { return sn.Name, nil } return fmt.Sprintf("%s (%s)", sn.Name, sn.Version.String()), nil } // Source {{{ // The files dists/$DIST/$COMP/source/Sources are called Sources indices. They // consist of multiple paragraphs, where each paragraph has the format defined // in Policy 5.5 (5.4 Debian source control files -- .dsc), with the following // changes and additional fields. The changes are: // // - The "Source" field is renamed to "Package" // - A new mandatory field "Directory" // - A new optional field "Priority" // - A new optional field "Section" // - (Note that any fields present in .dsc files can end here as well, even if // - they are not documented by Debian policy, or not yet documented yet). // // Each paragraph shall begin with a "Package" field. Clients may also accept // files where this is not the case. type Source struct { control.Paragraph Package string Directory string `required:"true"` Priority string Section string Format string Binaries []string `control:"Binary" delim:","` Architectures []dependency.Arch `control:"Architecture"` Version version.Version Origin string Maintainer string Uploaders []string Homepage string StandardsVersion string `control:"Standards-Version"` PackageList []string `control:"Package-List" delim:"\n" strip:" \t\n\r" multiline:"true"` ChecksumsSha1 []control.SHA1FileHash `control:"Checksums-Sha1" delim:"\n" strip:" \t\n\r" multiline:"true"` ChecksumsSha256 []control.SHA256FileHash `control:"Checksums-Sha256" delim:"\n" strip:" \t\n\r" multiline:"true"` Files []control.MD5FileHash `delim:"\n" strip:" \t\n\r" multiline:"true"` } // Source Helpers {{{ func (s Source) BuildDepends() (*dependency.Dependency, error) { return dependency.Parse(s.Paragraph.Values["Build-Depends"]) } // }}} // SourceFromDsc {{{ func SourceFromDsc(dsc *control.DSC, directory string) (*Source, error) { pkg := Source{} paragraph := dsc.Paragraph paragraph.Set("Directory", directory) // paragraph.Set("Filename", debFile.Path) return &pkg, control.UnpackFromParagraph(paragraph, &pkg) } // }}} // }}} // Sources {{{ type Sources struct { decoder *control.Decoder } // Next {{{ // Get the next Source entry in the Sources list. This will return an // io.EOF at the last entry. func (p *Sources) Next() (*Source, error) { next := Source{} return &next, p.decoder.Decode(&next) } // }}} // LoadSourcesFile {{{ // Given a path, create a Sources iterator. Note that the Sources // file is not OpenPGP signed, so one will need to verify the integrety // of this file from the InRelease file before trusting any output. func LoadSourcesFile(path string) (*Sources, error) { fd, err := os.Open(path) if err != nil { return nil, err } return LoadSources(fd) } // }}} // LoadSources {{{ // Given an io.Reader, create a Sources iterator. Note that the Sources // file is not OpenPGP signed, so one will need to verify the integrety // of this file from the InRelease file before trusting any output. func LoadSources(in io.Reader) (*Sources, error) { decoder, err := control.NewDecoder(in, nil) if err != nil { return nil, err } return &Sources{decoder: decoder}, nil } // }}} // }}} // vim: foldmethod=marker go-archive-1.0/untangle.go000066400000000000000000000031321331376772000155520ustar00rootroot00000000000000package archive import ( "fmt" "io" "sort" "pault.ag/go/debian/dependency" "pault.ag/go/debian/version" ) func SortPackages(packages []Package) []Package { sort.Slice(packages, func(i, j int) bool { return version.Compare(packages[i].Version, packages[j].Version) > 0 }) return packages } func SortSources(sources []Source) []Source { sort.Slice(sources, func(i, j int) bool { return version.Compare(sources[i].Version, sources[j].Version) > 0 }) return sources } type PackageMap map[string][]Package func LoadPackageMap(binaries Packages) (*PackageMap, error) { ret := PackageMap{} for { binary, err := binaries.Next() if err == io.EOF { break } else if err != nil { return nil, err } ret[binary.Package] = SortPackages(append(ret[binary.Package], *binary)) } return &ret, nil } type SourceMap map[string][]Source func LoadSourceMap(sources Sources) (*SourceMap, error) { ret := SourceMap{} for { source, err := sources.Next() if err == io.EOF { break } else if err != nil { return nil, err } ret[source.Package] = SortSources(append(ret[source.Package], *source)) } return &ret, nil } func (s SourceMap) Matches(possi dependency.Possibility) (int, error) { if possi.Arch != nil { return -1, fmt.Errorf("Arch is specified, but we're source! bad possi.") } candidates := s[possi.Name] if len(candidates) == 0 { return -1, fmt.Errorf("I have no idea what that source is!") } for i, candidate := range candidates { if possi.Version.SatisfiedBy(candidate.Version) { return i, nil } } return -1, fmt.Errorf("No satisfactory dependency found") }