pax_global_header00006660000000000000000000000064133065457560014530gustar00rootroot0000000000000052 comment=318e9768bf9a0fe52a64b9f8fe74f4f5caef6452 blazer-0.4.4/000077500000000000000000000000001330654575600130145ustar00rootroot00000000000000blazer-0.4.4/.gitignore000066400000000000000000000004231330654575600150030ustar00rootroot00000000000000bonfire # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof blazer-0.4.4/.travis.yml000066400000000000000000000002731330654575600151270ustar00rootroot00000000000000language: go go: - tip branches: only: - master before_script: go run internal/bin/cleanup/cleanup.go script: - go test -v ./base ./b2 ./x/... - go vet -v ./base ./b2 ./x/... blazer-0.4.4/CONTRIBUTING.md000066400000000000000000000026541330654575600152540ustar00rootroot00000000000000Want to contribute? Great! First, read this page (including the small print at the end). ### Before you contribute Before we can use your code, you must sign the [Google Individual Contributor License Agreement] (https://cla.developers.google.com/about/google-individual) (CLA), which you can do online. The CLA is necessary mainly because you own the copyright to your changes, even after your contribution becomes part of our codebase, so we need your permission to use and distribute your code. We also need to be sure of various other things—for instance that you'll tell us if you know that your code infringes on other people's patents. You don't have to sign the CLA until after you've submitted your code for review and a member has approved it, but you must do it before we can put your code into our codebase. Before you start working on a larger contribution, you should get in touch with us first through the issue tracker with your idea so that we can help out and possibly guide you. Coordinating up front makes it much easier to avoid frustration later on. ### Code reviews All submissions, including submissions by project members, require review. We use Github pull requests for this purpose. ### The small print Contributions made by corporations are covered by a different agreement than the one above, the [Software Grant and Corporate Contributor License Agreement] (https://cla.developers.google.com/about/google-corporate). blazer-0.4.4/LICENSE000066400000000000000000000010441330654575600140200ustar00rootroot00000000000000Copyright 2016, Google Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. blazer-0.4.4/README.md000066400000000000000000000063201330654575600142740ustar00rootroot00000000000000Blazer ==== [![GoDoc](https://godoc.org/github.com/kurin/blazer/b2?status.svg)](https://godoc.org/github.com/kurin/blazer/b2) [![Build Status](https://travis-ci.org/kurin/blazer.svg)](https://travis-ci.org/kurin/blazer) Blazer is a Golang client library for Backblaze's B2 object storage service. It is designed for simple integration with existing applications that may already be using S3 and Google Cloud Storage, by exporting only a few standard Go types. It implements and satisfies the [B2 integration checklist](https://www.backblaze.com/b2/docs/integration_checklist.html), automatically handling error recovery, reauthentication, and other low-level aspects, making it suitable to upload very large files, or over multi-day time scales. ```go import "github.com/kurin/blazer/b2" ``` ## Examples ### Copy a file into B2 ```go func copyFile(ctx context.Context, bucket *b2.Bucket, src, dst string) error { f, err := file.Open(src) if err != nil { return err } defer f.Close() obj := bucket.Object(dst) w := obj.NewWriter(ctx) if _, err := io.Copy(w, f); err != nil { w.Close() return err } return w.Close() } ``` If the file is less than 100MB, Blazer will simply buffer the file and use the `b2_upload_file` API to send the file to Backblaze. If the file is greater than 100MB, Blazer will use B2's large file support to upload the file in 100MB chunks. ### Copy a file into B2, with multiple concurrent uploads Uploading a large file with multiple HTTP connections is simple: ```go func copyFile(ctx context.Context, bucket *b2.Bucket, writers int, src, dst string) error { f, err := file.Open(src) if err != nil { return err } defer f.Close() w := bucket.Object(dst).NewWriter(ctx) w.ConcurrentUploads = writers if _, err := io.Copy(w, f); err != nil { w.Close() return err } return w.Close() } ``` This will automatically split the file into `writers` chunks of 100MB uploads. Note that 100MB is the smallest chunk size that B2 supports. ### Download a file from B2 Downloading is as simple as uploading: ```go func downloadFile(ctx context.Context, bucket *b2.Bucket, downloads int, src, dst string) error { r := bucket.Object(src).NewReader(ctx) defer r.Close() f, err := file.Create(dst) if err != nil { return err } r.ConcurrentDownloads = downloads if _, err := io.Copy(f, r); err != nil { f.Close() return err } return f.Close() } ``` ### List all objects in a bucket ```go func printObjects(ctx context.Context, bucket *b2.Bucket) error { iterator := bucket.List(ctx) for iterator.Next() { fmt.Println(itrator.Object()) } return iterator.Err() } ``` ### Grant temporary auth to a file Say you have a number of files in a private bucket, and you want to allow other people to download some files. This is possible to do by issuing a temporary authorization token for the prefix of the files you want to share. ```go token, err := bucket.AuthToken(ctx, "photos", time.Hour) ``` If successful, `token` is then an authorization token valid for one hour, which can be set in HTTP GET requests. The hostname to use when downloading files via HTTP is account-specific and can be found via the BaseURL method: ```go base := bucket.BaseURL() ``` --- This is not an official Google product. blazer-0.4.4/b2/000077500000000000000000000000001330654575600133175ustar00rootroot00000000000000blazer-0.4.4/b2/b2.go000066400000000000000000000502771330654575600141640ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package b2 provides a high-level interface to Backblaze's B2 cloud storage // service. // // It is specifically designed to abstract away the Backblaze API details by // providing familiar Go interfaces, specifically an io.Writer for object // storage, and an io.Reader for object download. Handling of transient // errors, including network and authentication timeouts, is transparent. // // Methods that perform network requests accept a context.Context argument. // Callers should use the context's cancellation abilities to end requests // early, or to provide timeout or deadline guarantees. // // This package is in development and may make API changes. package b2 import ( "context" "fmt" "io" "net/http" "regexp" "strconv" "sync" "time" ) // Client is a Backblaze B2 client. type Client struct { backend beRootInterface slock sync.Mutex sWriters map[string]*Writer sReaders map[string]*Reader sMethods []methodCounter } // NewClient creates and returns a new Client with valid B2 service account // tokens. func NewClient(ctx context.Context, account, key string, opts ...ClientOption) (*Client, error) { c := &Client{ backend: &beRoot{ b2i: &b2Root{}, }, sMethods: []methodCounter{ newMethodCounter(time.Minute, time.Second), newMethodCounter(time.Minute*5, time.Second), newMethodCounter(time.Hour, time.Minute), newMethodCounter(0, 0), // forever }, } opts = append(opts, client(c)) if err := c.backend.authorizeAccount(ctx, account, key, opts...); err != nil { return nil, err } return c, nil } type clientOptions struct { client *Client transport http.RoundTripper failSomeUploads bool expireTokens bool capExceeded bool userAgents []string } // for testing func (c clientOptions) eq(o clientOptions) bool { if c.client != o.client || c.transport != o.transport || c.failSomeUploads != o.failSomeUploads || c.expireTokens != o.expireTokens || c.capExceeded != o.capExceeded { return false } if len(c.userAgents) != len(o.userAgents) { return false } for i := range c.userAgents { if c.userAgents[i] != o.userAgents[i] { return false } } return true } // A ClientOption allows callers to adjust various per-client settings. type ClientOption func(*clientOptions) // UserAgent sets the User-Agent HTTP header. The default header is // "blazer/"; the value set here will be prepended to that. This can // be set multiple times. // // A user agent is generally of the form "/ ()". func UserAgent(agent string) ClientOption { return func(o *clientOptions) { o.userAgents = append(o.userAgents, agent) } } // Transport sets the underlying HTTP transport mechanism. If unset, // http.DefaultTransport is used. func Transport(rt http.RoundTripper) ClientOption { return func(c *clientOptions) { c.transport = rt } } // FailSomeUploads requests intermittent upload failures from the B2 service. // This is mostly useful for testing. func FailSomeUploads() ClientOption { return func(c *clientOptions) { c.failSomeUploads = true } } // ExpireSomeAuthTokens requests intermittent authentication failures from the // B2 service. func ExpireSomeAuthTokens() ClientOption { return func(c *clientOptions) { c.expireTokens = true } } // ForceCapExceeded requests a cap limit from the B2 service. This causes all // uploads to be treated as if they would exceed the configure B2 capacity. func ForceCapExceeded() ClientOption { return func(c *clientOptions) { c.capExceeded = true } } func client(cl *Client) ClientOption { return func(c *clientOptions) { c.client = cl } } type clientTransport struct { client *Client rt http.RoundTripper } func (ct *clientTransport) RoundTrip(r *http.Request) (*http.Response, error) { m := r.Header.Get("X-Blazer-Method") t := ct.rt if t == nil { t = http.DefaultTransport } b := time.Now() resp, err := t.RoundTrip(r) e := time.Now() if err != nil { return resp, err } if m != "" && ct.client != nil { ct.client.slock.Lock() m := method{ name: m, duration: e.Sub(b), status: resp.StatusCode, } for _, counter := range ct.client.sMethods { counter.record(m) } ct.client.slock.Unlock() } return resp, nil } // Bucket is a reference to a B2 bucket. type Bucket struct { b beBucketInterface r beRootInterface c *Client urlPool *urlPool } type BucketType string const ( UnknownType BucketType = "" Private = "allPrivate" Public = "allPublic" Snapshot = "snapshot" ) // BucketAttrs holds a bucket's metadata attributes. type BucketAttrs struct { // Type lists or sets the new bucket type. If Type is UnknownType during a // bucket.Update, the type is not changed. Type BucketType // Info records user data, limited to ten keys. If nil during a // bucket.Update, the existing bucket info is not modified. A bucket's // metadata can be removed by updating with an empty map. Info map[string]string // Reports or sets bucket lifecycle rules. If nil during a bucket.Update, // the rules are not modified. A bucket's rules can be removed by updating // with an empty slice. LifecycleRules []LifecycleRule } // A LifecycleRule describes an object's life cycle, namely how many days after // uploading an object should be hidden, and after how many days hidden an // object should be deleted. Multiple rules may not apply to the same file or // set of files. Be careful when using this feature; it can (is designed to) // delete your data. type LifecycleRule struct { // Prefix specifies all the files in the bucket to which this rule applies. Prefix string // DaysUploadedUntilHidden specifies the number of days after which a file // will automatically be hidden. 0 means "do not automatically hide new // files". DaysNewUntilHidden int // DaysHiddenUntilDeleted specifies the number of days after which a hidden // file is deleted. 0 means "do not automatically delete hidden files". DaysHiddenUntilDeleted int } type b2err struct { err error notFoundErr bool isUpdateConflict bool } func (e b2err) Error() string { return e.err.Error() } // IsNotExist reports whether a given error indicates that an object or bucket // does not exist. func IsNotExist(err error) bool { berr, ok := err.(b2err) if !ok { return false } return berr.notFoundErr } const uploadURLPoolSize = 100 type urlPool struct { ch chan beURLInterface } func newURLPool() *urlPool { return &urlPool{ch: make(chan beURLInterface, uploadURLPoolSize)} } func (p *urlPool) get() beURLInterface { select { case ue := <-p.ch: // if the channel has an upload URL available, use that return ue default: // otherwise return nil, a new upload URL needs to be generated return nil } } func (p *urlPool) put(u beURLInterface) { select { case p.ch <- u: // put the URL back if possible default: // if the channel is full, throw it away } } // Bucket returns a bucket if it exists. func (c *Client) Bucket(ctx context.Context, name string) (*Bucket, error) { buckets, err := c.backend.listBuckets(ctx) if err != nil { return nil, err } for _, bucket := range buckets { if bucket.name() == name { return &Bucket{ b: bucket, r: c.backend, c: c, urlPool: newURLPool(), }, nil } } return nil, b2err{ err: fmt.Errorf("%s: bucket not found", name), notFoundErr: true, } } // NewBucket returns a bucket. The bucket is created with the given attributes // if it does not already exist. If attrs is nil, it is created as a private // bucket with no info metadata and no lifecycle rules. func (c *Client) NewBucket(ctx context.Context, name string, attrs *BucketAttrs) (*Bucket, error) { buckets, err := c.backend.listBuckets(ctx) if err != nil { return nil, err } for _, bucket := range buckets { if bucket.name() == name { return &Bucket{ b: bucket, r: c.backend, c: c, urlPool: newURLPool(), }, nil } } if attrs == nil { attrs = &BucketAttrs{Type: Private} } b, err := c.backend.createBucket(ctx, name, string(attrs.Type), attrs.Info, attrs.LifecycleRules) if err != nil { return nil, err } return &Bucket{ b: b, r: c.backend, c: c, urlPool: newURLPool(), }, err } // ListBuckets returns all the available buckets. func (c *Client) ListBuckets(ctx context.Context) ([]*Bucket, error) { bs, err := c.backend.listBuckets(ctx) if err != nil { return nil, err } var buckets []*Bucket for _, b := range bs { buckets = append(buckets, &Bucket{ b: b, r: c.backend, c: c, urlPool: newURLPool(), }) } return buckets, nil } // IsUpdateConflict reports whether a given error is the result of a bucket // update conflict. func IsUpdateConflict(err error) bool { e, ok := err.(b2err) if !ok { return false } return e.isUpdateConflict } // Update modifies the given bucket with new attributes. It is possible that // this method could fail with an update conflict, in which case you should // retrieve the latest bucket attributes with Attrs and try again. func (b *Bucket) Update(ctx context.Context, attrs *BucketAttrs) error { return b.b.updateBucket(ctx, attrs) } // Attrs retrieves and returns the current bucket's attributes. func (b *Bucket) Attrs(ctx context.Context) (*BucketAttrs, error) { bucket, err := b.c.Bucket(ctx, b.Name()) if err != nil { return nil, err } b.b = bucket.b return b.b.attrs(), nil } var bNotExist = regexp.MustCompile("Bucket.*does not exist") // Delete removes a bucket. The bucket must be empty. func (b *Bucket) Delete(ctx context.Context) error { err := b.b.deleteBucket(ctx) if err == nil { return err } // So, the B2 documentation disagrees with the implementation here, and the // error code is not really helpful. If the bucket doesn't exist, the error is // 400, not 404, and the string is "Bucket does not exist". However, the // documentation says it will be "Bucket id does not exist". In case // they update the implementation to match the documentation, we're just going // to regexp over the error message and hope it's okay. if bNotExist.MatchString(err.Error()) { return b2err{ err: err, notFoundErr: true, } } return err } // BaseURL returns the base URL to use for all files uploaded to this bucket. func (b *Bucket) BaseURL() string { return b.b.baseURL() } // Name returns the bucket's name. func (b *Bucket) Name() string { return b.b.name() } // Object represents a B2 object. type Object struct { attrs *Attrs name string f beFileInterface b *Bucket } // Attrs holds an object's metadata. type Attrs struct { Name string // Not used on upload. Size int64 // Not used on upload. ContentType string // Used on upload, default is "application/octet-stream". Status ObjectState // Not used on upload. UploadTimestamp time.Time // Not used on upload. SHA1 string // Not used on upload. Can be "none" for large files. LastModified time.Time // If present, and there are fewer than 10 keys in the Info field, this is saved on upload. Info map[string]string // Save arbitrary metadata on upload, but limited to 10 keys. } // Name returns an object's name func (o *Object) Name() string { return o.name } // Attrs returns an object's attributes. func (o *Object) Attrs(ctx context.Context) (*Attrs, error) { if err := o.ensure(ctx); err != nil { return nil, err } fi, err := o.f.getFileInfo(ctx) if err != nil { return nil, err } name, sha, size, ct, info, st, stamp := fi.stats() var state ObjectState switch st { case "upload": state = Uploaded case "start": state = Started case "hide": state = Hider case "folder": state = Folder } var mtime time.Time if v, ok := info["src_last_modified_millis"]; ok { ms, err := strconv.ParseInt(v, 10, 64) if err != nil { return nil, err } mtime = time.Unix(ms/1e3, (ms%1e3)*1e6) delete(info, "src_last_modified_millis") } return &Attrs{ Name: name, Size: size, ContentType: ct, UploadTimestamp: stamp, SHA1: sha, Info: info, Status: state, LastModified: mtime, }, nil } // ObjectState represents the various states an object can be in. type ObjectState int const ( Unknown ObjectState = iota // Started represents a large upload that has been started but not finished // or canceled. Started // Uploaded represents an object that has finished uploading and is complete. Uploaded // Hider represents an object that exists only to hide another object. It // cannot in itself be downloaded and, in particular, is not a hidden object. Hider // Folder is a special state given to non-objects that are returned during a // List call with a ListDelimiter option. Folder ) // Object returns a reference to the named object in the bucket. Hidden // objects cannot be referenced in this manner; they can only be found by // finding the appropriate reference in ListObjects. func (b *Bucket) Object(name string) *Object { return &Object{ name: name, b: b, } } // URL returns the full URL to the given object. func (o *Object) URL() string { return fmt.Sprintf("%s/file/%s/%s", o.b.BaseURL(), o.b.Name(), o.name) } // NewWriter returns a new writer for the given object. Objects that are // overwritten are not deleted, but are "hidden". // // Callers must close the writer when finished and check the error status. func (o *Object) NewWriter(ctx context.Context) *Writer { ctx, cancel := context.WithCancel(ctx) return &Writer{ o: o, name: o.name, ctx: ctx, cancel: cancel, } } // NewRangeReader returns a reader for the given object, reading up to length // bytes. If length is negative, the rest of the object is read. func (o *Object) NewRangeReader(ctx context.Context, offset, length int64) *Reader { ctx, cancel := context.WithCancel(ctx) return &Reader{ ctx: ctx, cancel: cancel, o: o, name: o.name, chunks: make(map[int]*rchunk), length: length, offset: offset, } } // NewReader returns a reader for the given object. func (o *Object) NewReader(ctx context.Context) *Reader { return o.NewRangeReader(ctx, 0, -1) } func (o *Object) ensure(ctx context.Context) error { if o.f == nil { f, err := o.b.getObject(ctx, o.name) if err != nil { return err } o.f = f.f } return nil } // Delete removes the given object. func (o *Object) Delete(ctx context.Context) error { if err := o.ensure(ctx); err != nil { return err } return o.f.deleteFileVersion(ctx) } // Cursor is passed to ListObjects to return subsequent pages. // // DEPRECATED. Will be removed in a future release. type Cursor struct { // Prefix limits the listed objects to those that begin with this string. Prefix string // Delimiter denotes the path separator. If set, object listings will be // truncated at this character. // // For example, if the bucket contains objects foo/bar, foo/baz, and foo, // then a delimiter of "/" will cause the listing to return "foo" and "foo/". // Otherwise, the listing would have returned all object names. // // Note that objects returned that end in the delimiter may not be actual // objects, e.g. you cannot read from (or write to, or delete) an object "foo/", // both because no actual object exists and because B2 disallows object names // that end with "/". If you want to ensure that all objects returned by // ListObjects and ListCurrentObjects are actual objects, leave this unset. Delimiter string name string id string } // ListObjects returns all objects in the bucket, including multiple versions // of the same object. Cursor may be nil; when passed to a subsequent query, // it will continue the listing. // // ListObjects will return io.EOF when there are no objects left in the bucket, // however it may do so concurrently with the last objects. // // DEPRECATED. Will be removed in a future release. func (b *Bucket) ListObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) { if c == nil { c = &Cursor{} } fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.Prefix, c.Delimiter) if err != nil { return nil, nil, err } var next *Cursor if name != "" && id != "" { next = &Cursor{ Prefix: c.Prefix, Delimiter: c.Delimiter, name: name, id: id, } } var objects []*Object for _, f := range fs { objects = append(objects, &Object{ name: f.name(), f: f, b: b, }) } var rtnErr error if len(objects) == 0 || next == nil { rtnErr = io.EOF } return objects, next, rtnErr } // ListCurrentObjects is similar to ListObjects, except that it returns only // current, unhidden objects in the bucket. // // DEPRECATED. Will be removed in a future release. func (b *Bucket) ListCurrentObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) { if c == nil { c = &Cursor{} } fs, name, err := b.b.listFileNames(ctx, count, c.name, c.Prefix, c.Delimiter) if err != nil { return nil, nil, err } var next *Cursor if name != "" { next = &Cursor{ Prefix: c.Prefix, Delimiter: c.Delimiter, name: name, } } var objects []*Object for _, f := range fs { objects = append(objects, &Object{ name: f.name(), f: f, b: b, }) } var rtnErr error if len(objects) == 0 || next == nil { rtnErr = io.EOF } return objects, next, rtnErr } // ListUnfinishedLargeFiles lists any objects that correspond to large file uploads that haven't been completed. // This can happen for example when an upload is interrupted. // // DEPRECATED. Will be removed in a future release. func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) { if c == nil { c = &Cursor{} } fs, name, err := b.b.listUnfinishedLargeFiles(ctx, count, c.name) if err != nil { return nil, nil, err } var next *Cursor if name != "" { next = &Cursor{ name: name, } } var objects []*Object for _, f := range fs { objects = append(objects, &Object{ name: f.name(), f: f, b: b, }) } var rtnErr error if len(objects) == 0 || next == nil { rtnErr = io.EOF } return objects, next, rtnErr } // Hide hides the object from name-based listing. func (o *Object) Hide(ctx context.Context) error { if err := o.ensure(ctx); err != nil { return err } _, err := o.b.b.hideFile(ctx, o.name) return err } // Reveal unhides (if hidden) the named object. If there are multiple objects // of a given name, it will reveal the most recent. func (b *Bucket) Reveal(ctx context.Context, name string) error { cur := &Cursor{ name: name, } objs, _, err := b.ListObjects(ctx, 1, cur) if err != nil && err != io.EOF { return err } if len(objs) < 1 || objs[0].name != name { return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true} } obj := objs[0] if obj.f.status() != "hide" { return nil } return obj.Delete(ctx) } // I don't want to import all of ioutil for this. type discard struct{} func (discard) Write(p []byte) (int, error) { return len(p), nil } func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) { fr, err := b.b.downloadFileByName(ctx, name, 0, 1) if err != nil { return nil, err } io.Copy(discard{}, fr) fr.Close() return &Object{ name: name, f: b.b.file(fr.id(), name), b: b, }, nil } // AuthToken returns an authorization token that can be used to access objects // in a private bucket. Only objects that begin with prefix can be accessed. // The token expires after the given duration. func (b *Bucket) AuthToken(ctx context.Context, prefix string, valid time.Duration) (string, error) { return b.b.getDownloadAuthorization(ctx, prefix, valid) } blazer-0.4.4/b2/b2_test.go000066400000000000000000000456261330654575600152250ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "bytes" "context" "crypto/sha1" "fmt" "io" "io/ioutil" "net/http" "sort" "strings" "sync" "testing" "time" ) const ( bucketName = "b2-tests" smallFileName = "Teeny Tiny" largeFileName = "BigBytes" ) var gmux = &sync.Mutex{} type testError struct { retry bool backoff time.Duration reauth bool reupload bool } func (t testError) Error() string { return fmt.Sprintf("retry %v; backoff %v; reauth %v; reupload %v", t.retry, t.backoff, t.reauth, t.reupload) } type errCont struct { errMap map[string]map[int]error opMap map[string]int } func (e *errCont) getError(name string) error { if e.errMap == nil { return nil } if e.opMap == nil { e.opMap = make(map[string]int) } i := e.opMap[name] e.opMap[name]++ return e.errMap[name][i] } type testRoot struct { errs *errCont auths int bucketMap map[string]map[string]string } func (t *testRoot) authorizeAccount(context.Context, string, string, ...ClientOption) error { t.auths++ return nil } func (t *testRoot) backoff(err error) time.Duration { e, ok := err.(testError) if !ok { return 0 } return e.backoff } func (t *testRoot) reauth(err error) bool { e, ok := err.(testError) if !ok { return false } return e.reauth } func (t *testRoot) reupload(err error) bool { e, ok := err.(testError) if !ok { return false } return e.reupload } func (t *testRoot) transient(err error) bool { e, ok := err.(testError) if !ok { return false } return e.retry || e.reupload || e.backoff > 0 } func (t *testRoot) createBucket(_ context.Context, name, _ string, _ map[string]string, _ []LifecycleRule) (b2BucketInterface, error) { if err := t.errs.getError("createBucket"); err != nil { return nil, err } if _, ok := t.bucketMap[name]; ok { return nil, fmt.Errorf("%s: bucket exists", name) } m := make(map[string]string) t.bucketMap[name] = m return &testBucket{ n: name, errs: t.errs, files: m, }, nil } func (t *testRoot) listBuckets(context.Context) ([]b2BucketInterface, error) { var b []b2BucketInterface for k, v := range t.bucketMap { b = append(b, &testBucket{ n: k, errs: t.errs, files: v, }) } return b, nil } type testBucket struct { n string errs *errCont files map[string]string } func (t *testBucket) name() string { return t.n } func (t *testBucket) btype() string { return "allPrivate" } func (t *testBucket) attrs() *BucketAttrs { return nil } func (t *testBucket) deleteBucket(context.Context) error { return nil } func (t *testBucket) updateBucket(context.Context, *BucketAttrs) error { return nil } func (t *testBucket) getUploadURL(context.Context) (b2URLInterface, error) { if err := t.errs.getError("getUploadURL"); err != nil { return nil, err } return &testURL{ files: t.files, }, nil } func (t *testBucket) startLargeFile(_ context.Context, name, _ string, _ map[string]string) (b2LargeFileInterface, error) { return &testLargeFile{ name: name, parts: make(map[int][]byte), files: t.files, errs: t.errs, }, nil } func (t *testBucket) listFileNames(ctx context.Context, count int, cont, pfx, del string) ([]b2FileInterface, string, error) { var f []string gmux.Lock() defer gmux.Unlock() for name := range t.files { f = append(f, name) } sort.Strings(f) idx := sort.SearchStrings(f, cont) var b []b2FileInterface var next string for i := idx; i < len(f) && i-idx < count; i++ { b = append(b, &testFile{ n: f[i], s: int64(len(t.files[f[i]])), files: t.files, }) if i+1 < len(f) { next = f[i+1] } if i+1 == len(f) { next = "" } } return b, next, nil } func (t *testBucket) listFileVersions(ctx context.Context, count int, a, b, c, d string) ([]b2FileInterface, string, string, error) { x, y, z := t.listFileNames(ctx, count, a, c, d) return x, y, "", z } func (t *testBucket) listUnfinishedLargeFiles(ctx context.Context, count int, cont string) ([]b2FileInterface, string, error) { return nil, "", fmt.Errorf("testBucket.listUnfinishedLargeFiles(ctx, %d, %q): not implemented", count, cont) } func (t *testBucket) downloadFileByName(_ context.Context, name string, offset, size int64) (b2FileReaderInterface, error) { gmux.Lock() defer gmux.Unlock() f := t.files[name] end := int(offset + size) if end >= len(f) { end = len(f) } if int(offset) >= len(f) { return nil, errNoMoreContent } return &testFileReader{ b: ioutil.NopCloser(bytes.NewBufferString(f[offset:end])), s: end - int(offset), n: name, }, nil } func (t *testBucket) hideFile(context.Context, string) (b2FileInterface, error) { return nil, nil } func (t *testBucket) getDownloadAuthorization(context.Context, string, time.Duration) (string, error) { return "", nil } func (t *testBucket) baseURL() string { return "" } func (t *testBucket) file(id, name string) b2FileInterface { return nil } type testURL struct { files map[string]string } func (t *testURL) reload(context.Context) error { return nil } func (t *testURL) uploadFile(_ context.Context, r io.Reader, _ int, name, _, _ string, _ map[string]string) (b2FileInterface, error) { buf := &bytes.Buffer{} if _, err := io.Copy(buf, r); err != nil { return nil, err } gmux.Lock() defer gmux.Unlock() t.files[name] = buf.String() return &testFile{ n: name, s: int64(len(t.files[name])), files: t.files, }, nil } type testLargeFile struct { name string parts map[int][]byte files map[string]string errs *errCont } func (t *testLargeFile) finishLargeFile(context.Context) (b2FileInterface, error) { var total []byte gmux.Lock() defer gmux.Unlock() for i := 1; i <= len(t.parts); i++ { total = append(total, t.parts[i]...) } t.files[t.name] = string(total) return &testFile{ n: t.name, s: int64(len(total)), files: t.files, }, nil } func (t *testLargeFile) getUploadPartURL(context.Context) (b2FileChunkInterface, error) { gmux.Lock() defer gmux.Unlock() return &testFileChunk{ parts: t.parts, errs: t.errs, }, nil } type testFileChunk struct { parts map[int][]byte errs *errCont } func (t *testFileChunk) reload(context.Context) error { return nil } func (t *testFileChunk) uploadPart(_ context.Context, r io.Reader, _ string, _, index int) (int, error) { if err := t.errs.getError("uploadPart"); err != nil { return 0, err } buf := &bytes.Buffer{} i, err := io.Copy(buf, r) if err != nil { return int(i), err } gmux.Lock() defer gmux.Unlock() t.parts[index] = buf.Bytes() return int(i), nil } type testFile struct { n string s int64 t time.Time a string files map[string]string } func (t *testFile) name() string { return t.n } func (t *testFile) size() int64 { return t.s } func (t *testFile) timestamp() time.Time { return t.t } func (t *testFile) status() string { return t.a } func (t *testFile) compileParts(int64, map[int]string) b2LargeFileInterface { panic("not implemented") } func (t *testFile) getFileInfo(context.Context) (b2FileInfoInterface, error) { return nil, nil } func (t *testFile) listParts(context.Context, int, int) ([]b2FilePartInterface, int, error) { return nil, 0, nil } func (t *testFile) deleteFileVersion(context.Context) error { gmux.Lock() defer gmux.Unlock() delete(t.files, t.n) return nil } type testFileReader struct { b io.ReadCloser s int n string } func (t *testFileReader) Read(p []byte) (int, error) { return t.b.Read(p) } func (t *testFileReader) Close() error { return nil } func (t *testFileReader) stats() (int, string, string, map[string]string) { return t.s, "", "", nil } func (t *testFileReader) id() string { return t.n } type zReader struct{} var pattern = []byte{0x02, 0x80, 0xff, 0x1a, 0xcc, 0x63, 0x22} func (zReader) Read(p []byte) (int, error) { for i := 0; i+len(pattern) < len(p); i += len(pattern) { copy(p[i:], pattern) } return len(p), nil } type zReadSeeker struct { size int64 pos int64 } func (rs *zReadSeeker) Read(p []byte) (int, error) { for i := rs.pos; ; i++ { j := int(i - rs.pos) if j >= len(p) || i >= rs.size { var rtn error if i >= rs.size { rtn = io.EOF } rs.pos = i return j, rtn } f := int(i) % len(pattern) p[j] = pattern[f] } } func (rs *zReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { case io.SeekStart: rs.pos = offset case io.SeekEnd: rs.pos = rs.size + offset } return rs.pos, nil } func TestReaderFrom(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() table := []struct { size, pos int64 }{ { size: 10, }, } for _, e := range table { client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{}, }, }, } bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } defer func() { if err := bucket.Delete(ctx); err != nil { t.Error(err) } }() r := &zReadSeeker{pos: e.pos, size: e.size} w := bucket.Object("writer").NewWriter(ctx) n, err := w.ReadFrom(r) if err != nil { t.Errorf("ReadFrom(): %v", err) } if n != e.size { t.Errorf("ReadFrom(): got %d bytes, wanted %d bytes", n, e.size) } } } func TestReauth(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() root := &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "createBucket": {0: testError{reauth: true}}, }, }, } client := &Client{ backend: &beRoot{ b2i: root, }, } auths := root.auths if _, err := client.NewBucket(ctx, "fun", &BucketAttrs{Type: Private}); err != nil { t.Errorf("bucket should not err, got %v", err) } if root.auths != auths+1 { t.Errorf("client should have re-authenticated; did not") } } func TestBackoff(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() var calls []time.Duration ch := make(chan time.Time) close(ch) after = func(d time.Duration) <-chan time.Time { calls = append(calls, d) return ch } table := []struct { root *testRoot want int }{ { root: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "createBucket": { 0: testError{backoff: time.Second}, 1: testError{backoff: 2 * time.Second}, }, }, }, }, want: 2, }, { root: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "getUploadURL": { 0: testError{retry: true}, }, }, }, }, want: 1, }, } var total int for _, ent := range table { client := &Client{ backend: &beRoot{ b2i: ent.root, }, } b, err := client.NewBucket(ctx, "fun", &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } o := b.Object("foo") w := o.NewWriter(ctx) if _, err := io.Copy(w, bytes.NewBufferString("foo")); err != nil { t.Fatal(err) } if err := w.Close(); err != nil { t.Fatal(err) } total += ent.want } if len(calls) != total { t.Errorf("got %d calls, wanted %d", len(calls), total) } } func TestBackoffWithoutRetryAfter(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() var calls []time.Duration ch := make(chan time.Time) close(ch) after = func(d time.Duration) <-chan time.Time { calls = append(calls, d) return ch } root := &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "createBucket": { 0: testError{retry: true}, 1: testError{retry: true}, }, }, }, } client := &Client{ backend: &beRoot{ b2i: root, }, } if _, err := client.NewBucket(ctx, "fun", &BucketAttrs{Type: Private}); err != nil { t.Errorf("bucket should not err, got %v", err) } if len(calls) != 2 { t.Errorf("wrong number of backoff calls; got %d, want 2", len(calls)) } } type badTransport struct{} func (badTransport) RoundTrip(r *http.Request) (*http.Response, error) { return &http.Response{ Status: "700 What", StatusCode: 700, Body: ioutil.NopCloser(bytes.NewBufferString("{}")), Request: r, }, nil } func TestCustomTransport(t *testing.T) { ctx := context.Background() // Sorta fragile but... _, err := NewClient(ctx, "abcd", "efgh", Transport(badTransport{})) if err == nil { t.Error("NewClient returned successfully, expected an error") } if !strings.Contains(err.Error(), "700") { t.Errorf("Expected nonsense error code 700, got %v", err) } } func TestReaderDoubleClose(t *testing.T) { ctx := context.Background() client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{}, }, }, } bucket, err := client.NewBucket(ctx, "bucket", &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } o, _, err := writeFile(ctx, bucket, "file", 10, 10) if err != nil { t.Fatal(err) } r := o.NewReader(ctx) // Read to EOF, and then read some more. if _, err := io.Copy(ioutil.Discard, r); err != nil { t.Fatal(err) } if _, err := io.Copy(ioutil.Discard, r); err != nil { t.Fatal(err) } } func TestReadWrite(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{}, }, }, } bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } defer func() { if err := bucket.Delete(ctx); err != nil { t.Error(err) } }() sobj, wsha, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8) if err != nil { t.Fatal(err) } defer func() { if err := sobj.Delete(ctx); err != nil { t.Error(err) } }() if err := readFile(ctx, sobj, wsha, 1e5, 10); err != nil { t.Error(err) } lobj, wshaL, err := writeFile(ctx, bucket, largeFileName, 1e6-1e5, 1e4) if err != nil { t.Fatal(err) } defer func() { if err := lobj.Delete(ctx); err != nil { t.Error(err) } }() if err := readFile(ctx, lobj, wshaL, 1e7, 10); err != nil { t.Error(err) } } func TestReadRangeReturnsRight(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{}, }, }, } bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } defer func() { if err := bucket.Delete(ctx); err != nil { t.Error(err) } }() obj, _, err := writeFile(ctx, bucket, "file", 1e6+42, 1e8) if err != nil { t.Fatal(err) } r := obj.NewRangeReader(ctx, 200, 1400) r.ChunkSize = 1000 i, err := io.Copy(ioutil.Discard, r) if err != nil { t.Error(err) } if i != 1400 { t.Errorf("NewRangeReader(_, 200, 1400): want 1400, got %d", i) } } func TestWriterReturnsError(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "uploadPart": { 0: testError{}, 1: testError{}, 2: testError{}, 3: testError{}, 4: testError{}, 5: testError{}, 6: testError{}, }, }, }, }, }, } bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } w := bucket.Object("test").NewWriter(ctx) r := io.LimitReader(zReader{}, 1e7) w.ChunkSize = 1e4 w.ConcurrentUploads = 4 if _, err := io.Copy(w, r); err == nil { t.Fatalf("io.Copy: should have returned an error") } } func TestFileBuffer(t *testing.T) { r := io.LimitReader(zReader{}, 1e8) w, err := newFileBuffer("") if err != nil { t.Fatal(err) } defer w.Close() if _, err := io.Copy(w, r); err != nil { t.Fatal(err) } bReader, err := w.Reader() if err != nil { t.Fatal(err) } hsh := sha1.New() if _, err := io.Copy(hsh, bReader); err != nil { t.Fatal(err) } hshText := fmt.Sprintf("%x", hsh.Sum(nil)) if hshText != w.Hash() { t.Errorf("hashes are not equal: bufferWriter is %q, read buffer is %q", w.Hash(), hshText) } } func TestNonBuffer(t *testing.T) { table := []struct { str string off int64 len int64 want string }{ { str: "a string", off: 0, len: 3, want: "a s", }, { str: "a string", off: 3, len: 1, want: "t", }, { str: "a string", off: 3, len: 5, want: "tring", }, } for _, e := range table { nb := newNonBuffer(strings.NewReader(e.str), e.off, e.len) want := fmt.Sprintf("%s%x", e.want, sha1.Sum([]byte(e.str[int(e.off):int(e.off+e.len)]))) r, err := nb.Reader() if err != nil { t.Error(err) continue } got, err := ioutil.ReadAll(r) if err != nil { t.Errorf("ioutil.ReadAll(%#v): %v", e, err) continue } if want != string(got) { t.Errorf("ioutil.ReadAll(%#v): got %q, want %q", e, string(got), want) } } } func writeFile(ctx context.Context, bucket *Bucket, name string, size int64, csize int) (*Object, string, error) { r := io.LimitReader(zReader{}, size) o := bucket.Object(name) f := o.NewWriter(ctx) h := sha1.New() w := io.MultiWriter(f, h) f.ConcurrentUploads = 5 f.ChunkSize = csize n, err := io.Copy(w, r) if err != nil { return nil, "", err } if n != size { return nil, "", fmt.Errorf("io.Copy(): wrote %d bytes; wanted %d bytes", n, size) } if err := f.Close(); err != nil { return nil, "", err } return o, fmt.Sprintf("%x", h.Sum(nil)), nil } func readFile(ctx context.Context, obj *Object, sha string, chunk, concur int) error { r := obj.NewReader(ctx) r.ChunkSize = chunk r.ConcurrentDownloads = concur h := sha1.New() if _, err := io.Copy(h, r); err != nil { return err } if err := r.Close(); err != nil { return err } rsha := fmt.Sprintf("%x", h.Sum(nil)) if sha != rsha { return fmt.Errorf("bad hash: got %s, want %s", rsha, sha) } return nil } blazer-0.4.4/b2/backend.go000066400000000000000000000405051330654575600152410ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "context" "io" "math/rand" "time" ) // This file wraps the baseline interfaces with backoff and retry semantics. type beRootInterface interface { backoff(error) time.Duration reauth(error) bool transient(error) bool reupload(error) bool authorizeAccount(context.Context, string, string, ...ClientOption) error reauthorizeAccount(context.Context) error createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) listBuckets(context.Context) ([]beBucketInterface, error) } type beRoot struct { account, key string b2i b2RootInterface options []ClientOption } type beBucketInterface interface { name() string btype() BucketType attrs() *BucketAttrs updateBucket(context.Context, *BucketAttrs) error deleteBucket(context.Context) error getUploadURL(context.Context) (beURLInterface, error) startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (beLargeFileInterface, error) listFileNames(context.Context, int, string, string, string) ([]beFileInterface, string, error) listFileVersions(context.Context, int, string, string, string, string) ([]beFileInterface, string, string, error) listUnfinishedLargeFiles(context.Context, int, string) ([]beFileInterface, string, error) downloadFileByName(context.Context, string, int64, int64) (beFileReaderInterface, error) hideFile(context.Context, string) (beFileInterface, error) getDownloadAuthorization(context.Context, string, time.Duration) (string, error) baseURL() string file(string, string) beFileInterface } type beBucket struct { b2bucket b2BucketInterface ri beRootInterface } type beURLInterface interface { uploadFile(context.Context, readResetter, int, string, string, string, map[string]string) (beFileInterface, error) } type beURL struct { b2url b2URLInterface ri beRootInterface } type beFileInterface interface { name() string size() int64 timestamp() time.Time status() string deleteFileVersion(context.Context) error getFileInfo(context.Context) (beFileInfoInterface, error) listParts(context.Context, int, int) ([]beFilePartInterface, int, error) compileParts(int64, map[int]string) beLargeFileInterface } type beFile struct { b2file b2FileInterface url beURLInterface ri beRootInterface } type beLargeFileInterface interface { finishLargeFile(context.Context) (beFileInterface, error) getUploadPartURL(context.Context) (beFileChunkInterface, error) } type beLargeFile struct { b2largeFile b2LargeFileInterface ri beRootInterface } type beFileChunkInterface interface { reload(context.Context) error uploadPart(context.Context, readResetter, string, int, int) (int, error) } type beFileChunk struct { b2fileChunk b2FileChunkInterface ri beRootInterface } type beFileReaderInterface interface { io.ReadCloser stats() (int, string, string, map[string]string) id() string } type beFileReader struct { b2fileReader b2FileReaderInterface ri beRootInterface } type beFileInfoInterface interface { stats() (string, string, int64, string, map[string]string, string, time.Time) } type beFilePartInterface interface { number() int sha1() string size() int64 } type beFilePart struct { b2filePart b2FilePartInterface ri beRootInterface } type beFileInfo struct { name string sha string size int64 ct string info map[string]string status string stamp time.Time } func (r *beRoot) backoff(err error) time.Duration { return r.b2i.backoff(err) } func (r *beRoot) reauth(err error) bool { return r.b2i.reauth(err) } func (r *beRoot) reupload(err error) bool { return r.b2i.reupload(err) } func (r *beRoot) transient(err error) bool { return r.b2i.transient(err) } func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, opts ...ClientOption) error { f := func() error { if err := r.b2i.authorizeAccount(ctx, account, key, opts...); err != nil { return err } r.account = account r.key = key r.options = opts return nil } return withBackoff(ctx, r, f) } func (r *beRoot) reauthorizeAccount(ctx context.Context) error { return r.authorizeAccount(ctx, r.account, r.key, r.options...) } func (r *beRoot) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) { var bi beBucketInterface f := func() error { g := func() error { bucket, err := r.b2i.createBucket(ctx, name, btype, info, rules) if err != nil { return err } bi = &beBucket{ b2bucket: bucket, ri: r, } return nil } return withReauth(ctx, r, g) } if err := withBackoff(ctx, r, f); err != nil { return nil, err } return bi, nil } func (r *beRoot) listBuckets(ctx context.Context) ([]beBucketInterface, error) { var buckets []beBucketInterface f := func() error { g := func() error { bs, err := r.b2i.listBuckets(ctx) if err != nil { return err } for _, b := range bs { buckets = append(buckets, &beBucket{ b2bucket: b, ri: r, }) } return nil } return withReauth(ctx, r, g) } if err := withBackoff(ctx, r, f); err != nil { return nil, err } return buckets, nil } func (b *beBucket) name() string { return b.b2bucket.name() } func (b *beBucket) btype() BucketType { return BucketType(b.b2bucket.btype()) } func (b *beBucket) attrs() *BucketAttrs { return b.b2bucket.attrs() } func (b *beBucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error { f := func() error { g := func() error { return b.b2bucket.updateBucket(ctx, attrs) } return withReauth(ctx, b.ri, g) } return withBackoff(ctx, b.ri, f) } func (b *beBucket) deleteBucket(ctx context.Context) error { f := func() error { g := func() error { return b.b2bucket.deleteBucket(ctx) } return withReauth(ctx, b.ri, g) } return withBackoff(ctx, b.ri, f) } func (b *beBucket) getUploadURL(ctx context.Context) (beURLInterface, error) { var url beURLInterface f := func() error { g := func() error { u, err := b.b2bucket.getUploadURL(ctx) if err != nil { return err } url = &beURL{ b2url: u, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return url, nil } func (b *beBucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (beLargeFileInterface, error) { var file beLargeFileInterface f := func() error { g := func() error { f, err := b.b2bucket.startLargeFile(ctx, name, ct, info) if err != nil { return err } file = &beLargeFile{ b2largeFile: f, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return file, nil } func (b *beBucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]beFileInterface, string, error) { var cont string var files []beFileInterface f := func() error { g := func() error { fs, c, err := b.b2bucket.listFileNames(ctx, count, continuation, prefix, delimiter) if err != nil { return err } cont = c for _, f := range fs { files = append(files, &beFile{ b2file: f, ri: b.ri, }) } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, "", err } return files, cont, nil } func (b *beBucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]beFileInterface, string, string, error) { var name, id string var files []beFileInterface f := func() error { g := func() error { fs, n, d, err := b.b2bucket.listFileVersions(ctx, count, nextName, nextID, prefix, delimiter) if err != nil { return err } name = n id = d for _, f := range fs { files = append(files, &beFile{ b2file: f, ri: b.ri, }) } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, "", "", err } return files, name, id, nil } func (b *beBucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]beFileInterface, string, error) { var cont string var files []beFileInterface f := func() error { g := func() error { fs, c, err := b.b2bucket.listUnfinishedLargeFiles(ctx, count, continuation) if err != nil { return err } cont = c for _, f := range fs { files = append(files, &beFile{ b2file: f, ri: b.ri, }) } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, "", err } return files, cont, nil } func (b *beBucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (beFileReaderInterface, error) { var reader beFileReaderInterface f := func() error { g := func() error { fr, err := b.b2bucket.downloadFileByName(ctx, name, offset, size) if err != nil { return err } reader = &beFileReader{ b2fileReader: fr, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return reader, nil } func (b *beBucket) hideFile(ctx context.Context, name string) (beFileInterface, error) { var file beFileInterface f := func() error { g := func() error { f, err := b.b2bucket.hideFile(ctx, name) if err != nil { return err } file = &beFile{ b2file: f, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return file, nil } func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration) (string, error) { var tok string f := func() error { g := func() error { t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v) if err != nil { return err } tok = t return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return "", err } return tok, nil } func (b *beBucket) baseURL() string { return b.b2bucket.baseURL() } func (b *beBucket) file(id, name string) beFileInterface { return &beFile{ b2file: b.b2bucket.file(id, name), ri: b.ri, } } func (b *beURL) uploadFile(ctx context.Context, r readResetter, size int, name, ct, sha1 string, info map[string]string) (beFileInterface, error) { var file beFileInterface f := func() error { if err := r.Reset(); err != nil { return err } f, err := b.b2url.uploadFile(ctx, r, size, name, ct, sha1, info) if err != nil { return err } file = &beFile{ b2file: f, url: b, ri: b.ri, } return nil } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return file, nil } func (b *beFile) deleteFileVersion(ctx context.Context) error { f := func() error { g := func() error { return b.b2file.deleteFileVersion(ctx) } return withReauth(ctx, b.ri, g) } return withBackoff(ctx, b.ri, f) } func (b *beFile) size() int64 { return b.b2file.size() } func (b *beFile) name() string { return b.b2file.name() } func (b *beFile) timestamp() time.Time { return b.b2file.timestamp() } func (b *beFile) status() string { return b.b2file.status() } func (b *beFile) getFileInfo(ctx context.Context) (beFileInfoInterface, error) { var fileInfo beFileInfoInterface f := func() error { g := func() error { fi, err := b.b2file.getFileInfo(ctx) if err != nil { return err } name, sha, size, ct, info, status, stamp := fi.stats() fileInfo = &beFileInfo{ name: name, sha: sha, size: size, ct: ct, info: info, status: status, stamp: stamp, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return fileInfo, nil } func (b *beFile) listParts(ctx context.Context, next, count int) ([]beFilePartInterface, int, error) { var fpi []beFilePartInterface var rnxt int f := func() error { g := func() error { ps, n, err := b.b2file.listParts(ctx, next, count) if err != nil { return err } rnxt = n for _, p := range ps { fpi = append(fpi, &beFilePart{ b2filePart: p, ri: b.ri, }) } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, 0, err } return fpi, rnxt, nil } func (b *beFile) compileParts(size int64, seen map[int]string) beLargeFileInterface { return &beLargeFile{ b2largeFile: b.b2file.compileParts(size, seen), ri: b.ri, } } func (b *beLargeFile) getUploadPartURL(ctx context.Context) (beFileChunkInterface, error) { var chunk beFileChunkInterface f := func() error { g := func() error { fc, err := b.b2largeFile.getUploadPartURL(ctx) if err != nil { return err } chunk = &beFileChunk{ b2fileChunk: fc, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return chunk, nil } func (b *beLargeFile) finishLargeFile(ctx context.Context) (beFileInterface, error) { var file beFileInterface f := func() error { g := func() error { f, err := b.b2largeFile.finishLargeFile(ctx) if err != nil { return err } file = &beFile{ b2file: f, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return file, nil } func (b *beFileChunk) reload(ctx context.Context) error { f := func() error { g := func() error { return b.b2fileChunk.reload(ctx) } return withReauth(ctx, b.ri, g) } return withBackoff(ctx, b.ri, f) } func (b *beFileChunk) uploadPart(ctx context.Context, r readResetter, sha1 string, size, index int) (int, error) { // no re-auth; pass it back up to the caller so they can get an new upload URI and token // TODO: we should handle that here probably var i int f := func() error { if err := r.Reset(); err != nil { return err } j, err := b.b2fileChunk.uploadPart(ctx, r, sha1, size, index) if err != nil { return err } i = j return nil } if err := withBackoff(ctx, b.ri, f); err != nil { return 0, err } return i, nil } func (b *beFileReader) Read(p []byte) (int, error) { return b.b2fileReader.Read(p) } func (b *beFileReader) Close() error { return b.b2fileReader.Close() } func (b *beFileReader) stats() (int, string, string, map[string]string) { return b.b2fileReader.stats() } func (b *beFileReader) id() string { return b.b2fileReader.id() } func (b *beFileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) { return b.name, b.sha, b.size, b.ct, b.info, b.status, b.stamp } func (b *beFilePart) number() int { return b.b2filePart.number() } func (b *beFilePart) sha1() string { return b.b2filePart.sha1() } func (b *beFilePart) size() int64 { return b.b2filePart.size() } func jitter(d time.Duration) time.Duration { f := float64(d) f /= 50 f += f * (rand.Float64() - 0.5) return time.Duration(f) } func getBackoff(d time.Duration) time.Duration { if d > 15*time.Second { return d + jitter(d) } return d*2 + jitter(d*2) } var after = time.After func withBackoff(ctx context.Context, ri beRootInterface, f func() error) error { backoff := 500 * time.Millisecond for { err := f() if !ri.transient(err) { return err } bo := ri.backoff(err) if bo > 0 { backoff = bo } else { backoff = getBackoff(backoff) } select { case <-ctx.Done(): return ctx.Err() case <-after(backoff): } } } func withReauth(ctx context.Context, ri beRootInterface, f func() error) error { err := f() if ri.reauth(err) { if err := ri.reauthorizeAccount(ctx); err != nil { return err } err = f() } return err } blazer-0.4.4/b2/baseline.go000066400000000000000000000302521330654575600154320ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "context" "io" "net/http" "time" "github.com/kurin/blazer/base" ) // This file wraps the base package in a thin layer, for testing. It should be // the only file in b2 that imports base. type b2RootInterface interface { authorizeAccount(context.Context, string, string, ...ClientOption) error transient(error) bool backoff(error) time.Duration reauth(error) bool reupload(error) bool createBucket(context.Context, string, string, map[string]string, []LifecycleRule) (b2BucketInterface, error) listBuckets(context.Context) ([]b2BucketInterface, error) } type b2BucketInterface interface { name() string btype() string attrs() *BucketAttrs updateBucket(context.Context, *BucketAttrs) error deleteBucket(context.Context) error getUploadURL(context.Context) (b2URLInterface, error) startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (b2LargeFileInterface, error) listFileNames(context.Context, int, string, string, string) ([]b2FileInterface, string, error) listFileVersions(context.Context, int, string, string, string, string) ([]b2FileInterface, string, string, error) listUnfinishedLargeFiles(context.Context, int, string) ([]b2FileInterface, string, error) downloadFileByName(context.Context, string, int64, int64) (b2FileReaderInterface, error) hideFile(context.Context, string) (b2FileInterface, error) getDownloadAuthorization(context.Context, string, time.Duration) (string, error) baseURL() string file(string, string) b2FileInterface } type b2URLInterface interface { reload(context.Context) error uploadFile(context.Context, io.Reader, int, string, string, string, map[string]string) (b2FileInterface, error) } type b2FileInterface interface { name() string size() int64 timestamp() time.Time status() string deleteFileVersion(context.Context) error getFileInfo(context.Context) (b2FileInfoInterface, error) listParts(context.Context, int, int) ([]b2FilePartInterface, int, error) compileParts(int64, map[int]string) b2LargeFileInterface } type b2LargeFileInterface interface { finishLargeFile(context.Context) (b2FileInterface, error) getUploadPartURL(context.Context) (b2FileChunkInterface, error) } type b2FileChunkInterface interface { reload(context.Context) error uploadPart(context.Context, io.Reader, string, int, int) (int, error) } type b2FileReaderInterface interface { io.ReadCloser stats() (int, string, string, map[string]string) id() string } type b2FileInfoInterface interface { stats() (string, string, int64, string, map[string]string, string, time.Time) // bleck } type b2FilePartInterface interface { number() int sha1() string size() int64 } type b2Root struct { b *base.B2 } type b2Bucket struct { b *base.Bucket } type b2URL struct { b *base.URL } type b2File struct { b *base.File } type b2LargeFile struct { b *base.LargeFile } type b2FileChunk struct { b *base.FileChunk } type b2FileReader struct { b *base.FileReader } type b2FileInfo struct { b *base.FileInfo } type b2FilePart struct { b *base.FilePart } func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, opts ...ClientOption) error { c := &clientOptions{} for _, f := range opts { f(c) } var aopts []base.AuthOption ct := &clientTransport{client: c.client} if c.transport != nil { ct.rt = c.transport } aopts = append(aopts, base.Transport(ct)) if c.failSomeUploads { aopts = append(aopts, base.FailSomeUploads()) } if c.expireTokens { aopts = append(aopts, base.ExpireSomeAuthTokens()) } if c.capExceeded { aopts = append(aopts, base.ForceCapExceeded()) } for _, agent := range c.userAgents { aopts = append(aopts, base.UserAgent(agent)) } nb, err := base.AuthorizeAccount(ctx, account, key, aopts...) if err != nil { return err } if b.b == nil { b.b = nb return nil } b.b.Update(nb) return nil } func (*b2Root) backoff(err error) time.Duration { if base.Action(err) != base.Retry { return 0 } return base.Backoff(err) } func (*b2Root) reauth(err error) bool { return base.Action(err) == base.ReAuthenticate } func (*b2Root) reupload(err error) bool { return base.Action(err) == base.AttemptNewUpload } func (*b2Root) transient(err error) bool { return base.Action(err) == base.Retry } func (b *b2Root) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (b2BucketInterface, error) { var baseRules []base.LifecycleRule for _, rule := range rules { baseRules = append(baseRules, base.LifecycleRule{ DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, Prefix: rule.Prefix, }) } bucket, err := b.b.CreateBucket(ctx, name, btype, info, baseRules) if err != nil { return nil, err } return &b2Bucket{bucket}, nil } func (b *b2Root) listBuckets(ctx context.Context) ([]b2BucketInterface, error) { buckets, err := b.b.ListBuckets(ctx) if err != nil { return nil, err } var rtn []b2BucketInterface for _, bucket := range buckets { rtn = append(rtn, &b2Bucket{bucket}) } return rtn, err } func (b *b2Bucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error { if attrs == nil { return nil } if attrs.Type != UnknownType { b.b.Type = string(attrs.Type) } if attrs.Info != nil { b.b.Info = attrs.Info } if attrs.LifecycleRules != nil { rules := []base.LifecycleRule{} for _, rule := range attrs.LifecycleRules { rules = append(rules, base.LifecycleRule{ DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, Prefix: rule.Prefix, }) } b.b.LifecycleRules = rules } newBucket, err := b.b.Update(ctx) if err == nil { b.b = newBucket } code, _ := base.Code(err) if code == 409 { return b2err{ err: err, isUpdateConflict: true, } } return err } func (b *b2Bucket) deleteBucket(ctx context.Context) error { return b.b.DeleteBucket(ctx) } func (b *b2Bucket) name() string { return b.b.Name } func (b *b2Bucket) btype() string { return b.b.Type } func (b *b2Bucket) attrs() *BucketAttrs { var rules []LifecycleRule for _, rule := range b.b.LifecycleRules { rules = append(rules, LifecycleRule{ DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, Prefix: rule.Prefix, }) } return &BucketAttrs{ LifecycleRules: rules, Info: b.b.Info, Type: BucketType(b.b.Type), } } func (b *b2Bucket) getUploadURL(ctx context.Context) (b2URLInterface, error) { url, err := b.b.GetUploadURL(ctx) if err != nil { return nil, err } return &b2URL{url}, nil } func (b *b2Bucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (b2LargeFileInterface, error) { lf, err := b.b.StartLargeFile(ctx, name, ct, info) if err != nil { return nil, err } return &b2LargeFile{lf}, nil } func (b *b2Bucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]b2FileInterface, string, error) { fs, c, err := b.b.ListFileNames(ctx, count, continuation, prefix, delimiter) if err != nil { return nil, "", err } var files []b2FileInterface for _, f := range fs { files = append(files, &b2File{f}) } return files, c, nil } func (b *b2Bucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]b2FileInterface, string, string, error) { fs, name, id, err := b.b.ListFileVersions(ctx, count, nextName, nextID, prefix, delimiter) if err != nil { return nil, "", "", err } var files []b2FileInterface for _, f := range fs { files = append(files, &b2File{f}) } return files, name, id, nil } func (b *b2Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]b2FileInterface, string, error) { fs, cont, err := b.b.ListUnfinishedLargeFiles(ctx, count, continuation) if err != nil { return nil, "", err } var files []b2FileInterface for _, f := range fs { files = append(files, &b2File{f}) } return files, cont, nil } func (b *b2Bucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (b2FileReaderInterface, error) { fr, err := b.b.DownloadFileByName(ctx, name, offset, size) if err != nil { code, _ := base.Code(err) switch code { case http.StatusRequestedRangeNotSatisfiable: return nil, errNoMoreContent case http.StatusNotFound: return nil, b2err{err: err, notFoundErr: true} } return nil, err } return &b2FileReader{fr}, nil } func (b *b2Bucket) hideFile(ctx context.Context, name string) (b2FileInterface, error) { f, err := b.b.HideFile(ctx, name) if err != nil { return nil, err } return &b2File{f}, nil } func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration) (string, error) { return b.b.GetDownloadAuthorization(ctx, p, v) } func (b *b2Bucket) baseURL() string { return b.b.BaseURL() } func (b *b2Bucket) file(id, name string) b2FileInterface { return &b2File{b.b.File(id, name)} } func (b *b2URL) uploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (b2FileInterface, error) { file, err := b.b.UploadFile(ctx, r, size, name, contentType, sha1, info) if err != nil { return nil, err } return &b2File{file}, nil } func (b *b2URL) reload(ctx context.Context) error { return b.b.Reload(ctx) } func (b *b2File) deleteFileVersion(ctx context.Context) error { return b.b.DeleteFileVersion(ctx) } func (b *b2File) name() string { return b.b.Name } func (b *b2File) size() int64 { return b.b.Size } func (b *b2File) timestamp() time.Time { return b.b.Timestamp } func (b *b2File) status() string { return b.b.Status } func (b *b2File) getFileInfo(ctx context.Context) (b2FileInfoInterface, error) { if b.b.Info != nil { return &b2FileInfo{b.b.Info}, nil } fi, err := b.b.GetFileInfo(ctx) if err != nil { return nil, err } return &b2FileInfo{fi}, nil } func (b *b2File) listParts(ctx context.Context, next, count int) ([]b2FilePartInterface, int, error) { parts, n, err := b.b.ListParts(ctx, next, count) if err != nil { return nil, 0, err } var rtn []b2FilePartInterface for _, part := range parts { rtn = append(rtn, &b2FilePart{part}) } return rtn, n, nil } func (b *b2File) compileParts(size int64, seen map[int]string) b2LargeFileInterface { return &b2LargeFile{b.b.CompileParts(size, seen)} } func (b *b2LargeFile) finishLargeFile(ctx context.Context) (b2FileInterface, error) { f, err := b.b.FinishLargeFile(ctx) if err != nil { return nil, err } return &b2File{f}, nil } func (b *b2LargeFile) getUploadPartURL(ctx context.Context) (b2FileChunkInterface, error) { c, err := b.b.GetUploadPartURL(ctx) if err != nil { return nil, err } return &b2FileChunk{c}, nil } func (b *b2FileChunk) reload(ctx context.Context) error { return b.b.Reload(ctx) } func (b *b2FileChunk) uploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (int, error) { return b.b.UploadPart(ctx, r, sha1, size, index) } func (b *b2FileReader) Read(p []byte) (int, error) { return b.b.Read(p) } func (b *b2FileReader) Close() error { return b.b.Close() } func (b *b2FileReader) stats() (int, string, string, map[string]string) { return b.b.ContentLength, b.b.ContentType, b.b.SHA1, b.b.Info } func (b *b2FileReader) id() string { return b.b.ID } func (b *b2FileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) { return b.b.Name, b.b.SHA1, b.b.Size, b.b.ContentType, b.b.Info, b.b.Status, b.b.Timestamp } func (b *b2FilePart) number() int { return b.b.Number } func (b *b2FilePart) sha1() string { return b.b.SHA1 } func (b *b2FilePart) size() int64 { return b.b.Size } blazer-0.4.4/b2/buffer.go000066400000000000000000000106371330654575600151260ustar00rootroot00000000000000// Copyright 2017, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "bytes" "crypto/sha1" "errors" "fmt" "hash" "io" "io/ioutil" "os" "strings" "sync" ) type readResetter interface { Read([]byte) (int, error) Reset() error } type resetter struct { rs io.ReadSeeker } func (r resetter) Read(p []byte) (int, error) { return r.rs.Read(p) } func (r resetter) Reset() error { _, err := r.rs.Seek(0, 0); return err } func newResetter(p []byte) readResetter { return resetter{rs: bytes.NewReader(p)} } type writeBuffer interface { io.Writer Len() int Reader() (readResetter, error) Hash() string // sha1 or whatever it is Close() error } // nonBuffer doesn't buffer anything, but passes values directly from the // source readseeker. Many nonBuffers can point at different parts of the same // underlying source, and be accessed by multiple goroutines simultaneously. func newNonBuffer(rs io.ReaderAt, offset, size int64) writeBuffer { return &nonBuffer{ r: io.NewSectionReader(rs, offset, size), size: int(size), hsh: sha1.New(), } } type nonBuffer struct { r *io.SectionReader size int hsh hash.Hash isEOF bool buf *strings.Reader } func (nb *nonBuffer) Len() int { return nb.size + 40 } func (nb *nonBuffer) Hash() string { return "hex_digits_at_end" } func (nb *nonBuffer) Close() error { return nil } func (nb *nonBuffer) Reader() (readResetter, error) { return nb, nil } func (nb *nonBuffer) Write([]byte) (int, error) { return 0, errors.New("writes not supported") } func (nb *nonBuffer) Read(p []byte) (int, error) { if nb.isEOF { return nb.buf.Read(p) } n, err := io.TeeReader(nb.r, nb.hsh).Read(p) if err == io.EOF { err = nil nb.isEOF = true nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.hsh.Sum(nil))) } return n, err } func (nb *nonBuffer) Reset() error { nb.hsh.Reset() nb.isEOF = false _, err := nb.r.Seek(0, 0) return err } type memoryBuffer struct { buf *bytes.Buffer hsh hash.Hash w io.Writer mux sync.Mutex } var bufpool *sync.Pool func init() { bufpool = &sync.Pool{} bufpool.New = func() interface{} { return &bytes.Buffer{} } } func newMemoryBuffer() *memoryBuffer { mb := &memoryBuffer{ hsh: sha1.New(), } mb.buf = bufpool.Get().(*bytes.Buffer) mb.w = io.MultiWriter(mb.hsh, mb.buf) return mb } func (mb *memoryBuffer) Write(p []byte) (int, error) { return mb.w.Write(p) } func (mb *memoryBuffer) Len() int { return mb.buf.Len() } func (mb *memoryBuffer) Reader() (readResetter, error) { return newResetter(mb.buf.Bytes()), nil } func (mb *memoryBuffer) Hash() string { return fmt.Sprintf("%x", mb.hsh.Sum(nil)) } func (mb *memoryBuffer) Close() error { mb.mux.Lock() defer mb.mux.Unlock() if mb.buf == nil { return nil } mb.buf.Truncate(0) bufpool.Put(mb.buf) mb.buf = nil return nil } type fileBuffer struct { f *os.File hsh hash.Hash w io.Writer s int } func newFileBuffer(loc string) (*fileBuffer, error) { f, err := ioutil.TempFile(loc, "blazer") if err != nil { return nil, err } fb := &fileBuffer{ f: f, hsh: sha1.New(), } fb.w = io.MultiWriter(fb.f, fb.hsh) return fb, nil } func (fb *fileBuffer) Write(p []byte) (int, error) { n, err := fb.w.Write(p) fb.s += n return n, err } func (fb *fileBuffer) Len() int { return fb.s } func (fb *fileBuffer) Hash() string { return fmt.Sprintf("%x", fb.hsh.Sum(nil)) } func (fb *fileBuffer) Reader() (readResetter, error) { if _, err := fb.f.Seek(0, 0); err != nil { return nil, err } return &fr{f: fb.f}, nil } func (fb *fileBuffer) Close() error { fb.f.Close() return os.Remove(fb.f.Name()) } // wraps *os.File so that the http package doesn't see it as an io.Closer type fr struct { f *os.File } func (r *fr) Read(p []byte) (int, error) { return r.f.Read(p) } func (r *fr) Reset() error { _, err := r.f.Seek(0, 0); return err } blazer-0.4.4/b2/integration_test.go000066400000000000000000000534571330654575600172460ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "bytes" "context" "crypto/rand" "crypto/sha1" "encoding/hex" "fmt" "io" "net/http" "os" "reflect" "sync/atomic" "testing" "time" "github.com/kurin/blazer/x/transport" ) const ( apiID = "B2_ACCOUNT_ID" apiKey = "B2_SECRET_KEY" errVar = "B2_TRANSIENT_ERRORS" ) func TestReadWriteLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() sobj, wsha, err := writeFile(ctx, bucket, smallFileName, 1e6-42, 1e8) if err != nil { t.Fatal(err) } lobj, wshaL, err := writeFile(ctx, bucket, largeFileName, 5e6+5e4, 5e6) if err != nil { t.Fatal(err) } if err := readFile(ctx, lobj, wshaL, 1e6, 10); err != nil { t.Error(err) } if err := readFile(ctx, sobj, wsha, 1e5, 10); err != nil { t.Error(err) } iter := bucket.List(ctx, ListHidden()) for iter.Next() { if err := iter.Object().Delete(ctx); err != nil { t.Error(err) } } if err := iter.Err(); err != nil { t.Error(err) } } func TestReaderFromLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() table := []struct { size, pos int64 csize, writers int }{ { // that it works at all size: 10, }, { // large uploads size: 15e6 + 10, csize: 5e6, writers: 2, }, { // an excess of writers size: 50e6, csize: 5e6, writers: 12, }, { // with offset, seeks back to start after turning it into a ReaderAt size: 250, pos: 50, }, } for i, e := range table { rs := &zReadSeeker{pos: e.pos, size: e.size} o := bucket.Object(fmt.Sprintf("writer.%d", i)) w := o.NewWriter(ctx) w.ChunkSize = e.csize w.ConcurrentUploads = e.writers n, err := w.ReadFrom(rs) if err != nil { t.Errorf("ReadFrom(): %v", err) } if n != e.size { t.Errorf("ReadFrom(): got %d bytes, wanted %d bytes", n, e.size) } if err := w.Close(); err != nil { t.Errorf("w.Close(): %v", err) continue } r := o.NewReader(ctx) h := sha1.New() rn, err := io.Copy(h, r) if err != nil { t.Errorf("Read from B2: %v", err) } if rn != n { t.Errorf("Read from B2: got %d bytes, want %d bytes", rn, n) } if err := r.Close(); err != nil { t.Errorf("r.Close(): %v", err) } hex := fmt.Sprintf("%x", h.Sum(nil)) attrs, err := o.Attrs(ctx) if err != nil { t.Errorf("Attrs(): %v", err) continue } if attrs.SHA1 == "none" { continue } if hex != attrs.SHA1 { t.Errorf("SHA1: got %q, want %q", hex, attrs.SHA1) } } } func TestHideShowLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() // write a file obj, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8) if err != nil { t.Fatal(err) } got, err := countObjects(bucket.List(ctx)) if err != nil { t.Error(err) } if got != 1 { t.Fatalf("got %d objects, wanted 1", got) } // When the hide marker and the object it's hiding were created within the // same second, they can be sorted in the wrong order, causing the object to // fail to be hidden. time.Sleep(1500 * time.Millisecond) // hide the file if err := obj.Hide(ctx); err != nil { t.Fatal(err) } got, err = countObjects(bucket.List(ctx)) if err != nil { t.Error(err) } if got != 0 { t.Fatalf("got %d objects, wanted 0", got) } // unhide the file if err := bucket.Reveal(ctx, smallFileName); err != nil { t.Fatal(err) } // count see the object again got, err = countObjects(bucket.List(ctx)) if err != nil { t.Error(err) } if got != 1 { t.Fatalf("got %d objects, wanted 1", got) } } type cancelReader struct { r io.Reader n, l int c func() } func (c *cancelReader) Read(p []byte) (int, error) { n, err := c.r.Read(p) c.n += n if c.n >= c.l { c.c() } return n, err } func TestResumeWriter(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) bucket, _ := startLiveTest(ctx, t) w := bucket.Object("foo").NewWriter(ctx) w.ChunkSize = 5e6 r := &cancelReader{ r: io.LimitReader(zReader{}, 15e6), l: 6e6, c: cancel, } if _, err := io.Copy(w, r); err != context.Canceled { t.Fatalf("io.Copy: wanted canceled context, got: %v", err) } ctx2 := context.Background() ctx2, cancel2 := context.WithTimeout(ctx2, 10*time.Minute) defer cancel2() bucket2, done := startLiveTest(ctx2, t) defer done() w2 := bucket2.Object("foo").NewWriter(ctx2) w2.ChunkSize = 5e6 r2 := io.LimitReader(zReader{}, 15e6) h1 := sha1.New() tr := io.TeeReader(r2, h1) w2.Resume = true w2.ConcurrentUploads = 2 if _, err := io.Copy(w2, tr); err != nil { t.Fatal(err) } if err := w2.Close(); err != nil { t.Fatal(err) } begSHA := fmt.Sprintf("%x", h1.Sum(nil)) objR := bucket2.Object("foo").NewReader(ctx2) objR.ConcurrentDownloads = 3 h2 := sha1.New() if _, err := io.Copy(h2, objR); err != nil { t.Fatal(err) } if err := objR.Close(); err != nil { t.Error(err) } endSHA := fmt.Sprintf("%x", h2.Sum(nil)) if endSHA != begSHA { t.Errorf("got conflicting hashes: got %q, want %q", endSHA, begSHA) } } func TestAttrs(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() attrlist := []*Attrs{ &Attrs{ ContentType: "jpeg/stream", Info: map[string]string{ "one": "a", "two": "b", }, }, &Attrs{ ContentType: "application/MAGICFACE", LastModified: time.Unix(1464370149, 142000000), Info: map[string]string{}, // can't be nil }, &Attrs{ ContentType: "arbitrarystring", Info: map[string]string{ "spaces": "string with spaces", "unicode": "日本語", "special": "&/!@_.~", }, }, } table := []struct { name string size int64 }{ { name: "small", size: 1e3, }, { name: "large", size: 5e6 + 4, }, } for _, e := range table { for _, attrs := range attrlist { o := bucket.Object(e.name) w := o.NewWriter(ctx).WithAttrs(attrs) if _, err := io.Copy(w, io.LimitReader(zReader{}, e.size)); err != nil { t.Error(err) continue } if err := w.Close(); err != nil { t.Error(err) continue } gotAttrs, err := bucket.Object(e.name).Attrs(ctx) if err != nil { t.Error(err) continue } if gotAttrs.ContentType != attrs.ContentType { t.Errorf("bad content-type for %s: got %q, want %q", e.name, gotAttrs.ContentType, attrs.ContentType) } if !reflect.DeepEqual(gotAttrs.Info, attrs.Info) { t.Errorf("bad info for %s: got %#v, want %#v", e.name, gotAttrs.Info, attrs.Info) } if !gotAttrs.LastModified.Equal(attrs.LastModified) { t.Errorf("bad lastmodified time for %s: got %v, want %v", e.name, gotAttrs.LastModified, attrs.LastModified) } if err := o.Delete(ctx); err != nil { t.Errorf("Object(%q).Delete: %v", e.name, err) } } } } func TestFileBufferLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() r := io.LimitReader(zReader{}, 1e6) w := bucket.Object("small").NewWriter(ctx) w.UseFileBuffer = true w.Write(nil) wb, ok := w.w.(*fileBuffer) if !ok { t.Fatalf("writer isn't using file buffer: %T", w.w) } smallTmpName := wb.f.Name() if _, err := io.Copy(w, r); err != nil { t.Errorf("creating small file: %v", err) } if err := w.Close(); err != nil { t.Errorf("w.Close(): %v", err) } if _, err := os.Stat(smallTmpName); !os.IsNotExist(err) { t.Errorf("tmp file exists (%s) or other error: %v", smallTmpName, err) } } func TestAuthTokLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() foo := "foo/bar" baz := "baz/bar" fw := bucket.Object(foo).NewWriter(ctx) io.Copy(fw, io.LimitReader(zReader{}, 1e5)) if err := fw.Close(); err != nil { t.Fatal(err) } bw := bucket.Object(baz).NewWriter(ctx) io.Copy(bw, io.LimitReader(zReader{}, 1e5)) if err := bw.Close(); err != nil { t.Fatal(err) } tok, err := bucket.AuthToken(ctx, "foo", time.Hour) if err != nil { t.Fatal(err) } furl := fmt.Sprintf("%s?Authorization=%s", bucket.Object(foo).URL(), tok) frsp, err := http.Get(furl) if err != nil { t.Fatal(err) } if frsp.StatusCode != 200 { t.Fatalf("%s: got %s, want 200", furl, frsp.Status) } burl := fmt.Sprintf("%s?Authorization=%s", bucket.Object(baz).URL(), tok) brsp, err := http.Get(burl) if err != nil { t.Fatal(err) } if brsp.StatusCode != 401 { t.Fatalf("%s: got %s, want 401", burl, brsp.Status) } } func TestRangeReaderLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() buf := &bytes.Buffer{} io.Copy(buf, io.LimitReader(zReader{}, 3e6)) rs := bytes.NewReader(buf.Bytes()) w := bucket.Object("foobar").NewWriter(ctx) if _, err := io.Copy(w, rs); err != nil { t.Fatal(err) } if err := w.Close(); err != nil { t.Fatal(err) } table := []struct { offset, length int64 size int64 // expected actual size }{ { offset: 1e6 - 50, length: 1e6 + 50, size: 1e6 + 50, }, { offset: 0, length: -1, size: 3e6, }, { offset: 2e6, length: -1, size: 1e6, }, { offset: 2e6, length: 2e6, size: 1e6, }, { offset: 0, length: 4e6, size: 3e6, }, } for _, e := range table { if _, err := rs.Seek(e.offset, 0); err != nil { t.Error(err) continue } hw := sha1.New() var lr io.Reader lr = rs if e.length >= 0 { lr = io.LimitReader(rs, e.length) } if _, err := io.Copy(hw, lr); err != nil { t.Error(err) continue } r := bucket.Object("foobar").NewRangeReader(ctx, e.offset, e.length) defer r.Close() hr := sha1.New() read, err := io.Copy(hr, r) if err != nil { t.Error(err) continue } if read != e.size { t.Errorf("NewRangeReader(_, %d, %d): read %d bytes, wanted %d bytes", e.offset, e.length, read, e.size) } got := fmt.Sprintf("%x", hr.Sum(nil)) want := fmt.Sprintf("%x", hw.Sum(nil)) if got != want { t.Errorf("NewRangeReader(_, %d, %d): got %q, want %q", e.offset, e.length, got, want) } } } func TestListObjectsWithPrefix(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() foo := "foo/bar" baz := "baz/bar" fw := bucket.Object(foo).NewWriter(ctx) io.Copy(fw, io.LimitReader(zReader{}, 1e5)) if err := fw.Close(); err != nil { t.Fatal(err) } bw := bucket.Object(baz).NewWriter(ctx) io.Copy(bw, io.LimitReader(zReader{}, 1e5)) if err := bw.Close(); err != nil { t.Fatal(err) } table := []struct { opts []ListOption }{ { opts: []ListOption{ ListPrefix("baz/"), }, }, { opts: []ListOption{ ListPrefix("baz/"), ListHidden(), }, }, } for _, entry := range table { iter := bucket.List(ctx, entry.opts...) var res []string for iter.Next() { o := iter.Object() attrs, err := o.Attrs(ctx) if err != nil { t.Errorf("(%v).Attrs: %v", o, err) continue } res = append(res, attrs.Name) } if iter.Err() != nil { t.Errorf("iter.Err(): %v", iter.Err()) } want := []string{"baz/bar"} if !reflect.DeepEqual(res, want) { t.Errorf("got %v, want %v", res, want) } } } func compare(a, b *BucketAttrs) bool { if a == nil { a = &BucketAttrs{} } if b == nil { b = &BucketAttrs{} } if a.Type != b.Type && !((a.Type == "" && b.Type == Private) || (a.Type == Private && b.Type == "")) { return false } if !reflect.DeepEqual(a.Info, b.Info) && (len(a.Info) > 0 || len(b.Info) > 0) { return false } return reflect.DeepEqual(a.LifecycleRules, b.LifecycleRules) } func TestNewBucket(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) defer cancel() client, err := NewClient(ctx, id, key) if err != nil { t.Fatal(err) } table := []struct { name string attrs *BucketAttrs }{ { name: "no-attrs", }, { name: "only-rules", attrs: &BucketAttrs{ LifecycleRules: []LifecycleRule{ { Prefix: "whee/", DaysHiddenUntilDeleted: 30, }, { Prefix: "whoa/", DaysNewUntilHidden: 1, }, }, }, }, { name: "only-info", attrs: &BucketAttrs{ Info: map[string]string{ "this": "that", "other": "thing", }, }, }, } for _, ent := range table { bucket, err := client.NewBucket(ctx, id+"-"+ent.name, ent.attrs) if err != nil { t.Errorf("%s: NewBucket(%v): %v", ent.name, ent.attrs, err) continue } defer bucket.Delete(ctx) if err := bucket.Update(ctx, nil); err != nil { t.Errorf("%s: Update(ctx, nil): %v", ent.name, err) continue } attrs, err := bucket.Attrs(ctx) if err != nil { t.Errorf("%s: Attrs(ctx): %v", ent.name, err) continue } if !compare(attrs, ent.attrs) { t.Errorf("%s: attrs disagree: got %v, want %v", ent.name, attrs, ent.attrs) } } } func TestDuelingBuckets(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() bucket2, done2 := startLiveTest(ctx, t) defer done2() attrs, err := bucket.Attrs(ctx) if err != nil { t.Fatal(err) } attrs2, err := bucket2.Attrs(ctx) if err != nil { t.Fatal(err) } attrs.Info["food"] = "yum" if err := bucket.Update(ctx, attrs); err != nil { t.Fatal(err) } attrs2.Info["nails"] = "not" if err := bucket2.Update(ctx, attrs2); !IsUpdateConflict(err) { t.Fatalf("bucket.Update should have failed with IsUpdateConflict; instead failed with %v", err) } attrs2, err = bucket2.Attrs(ctx) if err != nil { t.Fatal(err) } attrs2.Info["nails"] = "not" if err := bucket2.Update(ctx, nil); err != nil { t.Fatal(err) } if err := bucket2.Update(ctx, attrs2); err != nil { t.Fatal(err) } } func TestNotExist(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() if _, err := bucket.Object("not there").Attrs(ctx); !IsNotExist(err) { t.Errorf("IsNotExist() on nonexistent object returned false (%v)", err) } } func TestWriteEmpty(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() _, _, err := writeFile(ctx, bucket, smallFileName, 0, 1e8) if err != nil { t.Fatal(err) } } func TestAttrsNoRoundtrip(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() _, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8) if err != nil { t.Fatal(err) } iter := bucket.List(ctx) iter.Next() obj := iter.Object() var trips int for range bucket.c.Status().table()["1m"] { trips++ } attrs, err := obj.Attrs(ctx) if err != nil { t.Fatal(err) } if attrs.Name != smallFileName { t.Errorf("got the wrong object: got %q, want %q", attrs.Name, smallFileName) } var newTrips int for range bucket.c.Status().table()["1m"] { newTrips++ } if trips != newTrips { t.Errorf("Attrs() should not have caused any net traffic, but it did: old %d, new %d", trips, newTrips) } } /*func TestAttrsFewRoundtrips(t *testing.T) { rt := &rtCounter{rt: defaultTransport} defaultTransport = rt defer func() { defaultTransport = rt.rt }() ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() _, _, err := writeFile(ctx, bucket, smallFileName, 42, 1e8) if err != nil { t.Fatal(err) } o := bucket.Object(smallFileName) trips := rt.trips attrs, err := o.Attrs(ctx) if err != nil { t.Fatal(err) } if attrs.Name != smallFileName { t.Errorf("got the wrong object: got %q, want %q", attrs.Name, smallFileName) } if trips != rt.trips { t.Errorf("Attrs(): too many round trips, got %d, want 1", rt.trips-trips) } }*/ func TestSmallUploadsFewRoundtrips(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() for i := 0; i < 10; i++ { _, _, err := writeFile(ctx, bucket, fmt.Sprintf("%s.%d", smallFileName, i), 42, 1e8) if err != nil { t.Fatal(err) } } si := bucket.c.Status() getURL := si.RPCs[0].CountByMethod()["b2_get_upload_url"] uploadFile := si.RPCs[0].CountByMethod()["b2_upload_file"] if getURL >= uploadFile { t.Errorf("too many calls to b2_get_upload_url") } } func TestDeleteWithoutName(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() _, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8) if err != nil { t.Fatal(err) } if err := bucket.Object(smallFileName).Delete(ctx); err != nil { t.Fatal(err) } } func TestListUnfinishedLargeFiles(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() w := bucket.Object(largeFileName).NewWriter(ctx) w.ChunkSize = 1e5 if _, err := io.Copy(w, io.LimitReader(zReader{}, 1e6)); err != nil { t.Fatal(err) } iter := bucket.List(ctx, ListUnfinished()) if !iter.Next() { t.Errorf("ListUnfinishedLargeFiles: got none, want 1 (error %v)", iter.Err()) } } func TestReauthPreservesOptions(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() var first []ClientOption opts := bucket.r.(*beRoot).options for _, o := range opts { first = append(first, o) } if err := bucket.r.reauthorizeAccount(ctx); err != nil { t.Fatalf("reauthorizeAccount: %v", err) } second := bucket.r.(*beRoot).options if len(second) != len(first) { t.Fatalf("options mismatch: got %d options, wanted %d", len(second), len(first)) } var f, s clientOptions for i := range first { first[i](&f) second[i](&s) } if !f.eq(s) { t.Errorf("options mismatch: got %v, want %v", s, f) } } type object struct { o *Object err error } func countObjects(iter *ObjectIterator) (int, error) { var got int for iter.Next() { got++ } return got, iter.Err() } var defaultTransport = http.DefaultTransport type eofTripper struct { rt http.RoundTripper t *testing.T } func (et eofTripper) RoundTrip(req *http.Request) (*http.Response, error) { resp, err := et.rt.RoundTrip(req) if err != nil { return nil, err } resp.Body = &eofReadCloser{rc: resp.Body, t: et.t} return resp, nil } type eofReadCloser struct { rc io.ReadCloser eof bool t *testing.T } func (eof *eofReadCloser) Read(p []byte) (int, error) { n, err := eof.rc.Read(p) if err == io.EOF { eof.eof = true } return n, err } func (eof *eofReadCloser) Close() error { if !eof.eof { eof.t.Error("http body closed with bytes unread") } return eof.rc.Close() } // Checks that close is called. type ccTripper struct { t *testing.T rt http.RoundTripper trips int64 } func (cc *ccTripper) RoundTrip(req *http.Request) (*http.Response, error) { resp, err := cc.rt.RoundTrip(req) if err != nil { return nil, err } atomic.AddInt64(&cc.trips, 1) resp.Body = &ccRC{ReadCloser: resp.Body, c: &cc.trips} return resp, err } func (cc *ccTripper) done() { if cc.trips != 0 { cc.t.Errorf("failed to close %d HTTP bodies", cc.trips) } } type ccRC struct { io.ReadCloser c *int64 } func (cc *ccRC) Close() error { atomic.AddInt64(cc.c, -1) return cc.ReadCloser.Close() } var uniq string func init() { b := make([]byte, 4) if _, err := rand.Read(b); err != nil { panic(err) } uniq = hex.EncodeToString(b) } func startLiveTest(ctx context.Context, t *testing.T) (*Bucket, func()) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") return nil, nil } ccport := &ccTripper{rt: defaultTransport, t: t} tport := eofTripper{rt: ccport, t: t} errport := transport.WithFailures(tport, transport.FailureRate(.25), transport.MatchPathSubstring("/b2_get_upload_url"), transport.Response(503)) client, err := NewClient(ctx, id, key, FailSomeUploads(), ExpireSomeAuthTokens(), Transport(errport), UserAgent("b2-test"), UserAgent("integration-test")) if err != nil { t.Fatal(err) return nil, nil } bucket, err := client.NewBucket(ctx, fmt.Sprintf("%s-%s-%s", id, bucketName, uniq), nil) if err != nil { t.Fatal(err) return nil, nil } f := func() { defer ccport.done() iter := bucket.List(ctx, ListHidden()) for iter.Next() { if err := iter.Object().Delete(ctx); err != nil { t.Error(err) } } if err := iter.Err(); err != nil && !IsNotExist(err) { t.Errorf("%#v", err) } if err := bucket.Delete(ctx); err != nil && !IsNotExist(err) { t.Error(err) } } return bucket, f } blazer-0.4.4/b2/iterator.go000066400000000000000000000124621330654575600155040ustar00rootroot00000000000000// Copyright 2018, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "context" "io" "sync" ) // List returns an iterator for selecting objects in a bucket. The default // behavior, with no options, is to list all currently un-hidden objects. func (b *Bucket) List(ctx context.Context, opts ...ListOption) *ObjectIterator { o := &ObjectIterator{ bucket: b, ctx: ctx, } for _, opt := range opts { opt(&o.opts) } return o } // ObjectIterator abtracts away the tricky bits of iterating over a bucket's // contents. // // It is intended to be called in a loop: // for iter.Next() { // obj := iter.Object() // // act on obj // } // if err := iter.Err(); err != nil { // // handle err // } type ObjectIterator struct { bucket *Bucket ctx context.Context final bool err error idx int c *Cursor opts objectIteratorOptions objs []*Object init sync.Once l lister count int } type lister func(context.Context, int, *Cursor) ([]*Object, *Cursor, error) func (o *ObjectIterator) page(ctx context.Context) error { if o.opts.locker != nil { o.opts.locker.Lock() defer o.opts.locker.Unlock() } objs, c, err := o.l(ctx, o.count, o.c) if err != nil && err != io.EOF { if bNotExist.MatchString(err.Error()) { return b2err{ err: err, notFoundErr: true, } } return err } o.c = c o.objs = objs o.idx = 0 if err == io.EOF { o.final = true } return nil } // Next advances the iterator to the next object. It should be called before // any calls to Object(). If Next returns true, then the next call to Object() // will be valid. Once Next returns false, it is important to check the return // value of Err(). func (o *ObjectIterator) Next() bool { o.init.Do(func() { o.count = o.opts.pageSize if o.count < 0 || o.count > 1000 { o.count = 1000 } switch { case o.opts.unfinished: o.l = o.bucket.ListUnfinishedLargeFiles if o.count > 100 { o.count = 100 } case o.opts.hidden: o.l = o.bucket.ListObjects default: o.l = o.bucket.ListCurrentObjects } o.c = &Cursor{ Prefix: o.opts.prefix, Delimiter: o.opts.delimiter, } }) if o.err != nil { return false } if o.ctx.Err() != nil { o.err = o.ctx.Err() return false } if o.idx >= len(o.objs) { if o.final { o.err = io.EOF return false } if err := o.page(o.ctx); err != nil { o.err = err return false } return o.Next() } o.idx++ return true } // Object returns the current object. func (o *ObjectIterator) Object() *Object { return o.objs[o.idx-1] } // Err returns the current error or nil. If Next() returns false and Err() is // nil, then all objects have been seen. func (o *ObjectIterator) Err() error { if o.err == io.EOF { return nil } return o.err } type objectIteratorOptions struct { hidden bool unfinished bool prefix string delimiter string pageSize int locker sync.Locker } // A ListOption alters the default behavor of List. type ListOption func(*objectIteratorOptions) // ListHidden will include hidden objects in the output. func ListHidden() ListOption { return func(o *objectIteratorOptions) { o.hidden = true } } // ListUnfinished will list unfinished large file operations instead of // existing objects. func ListUnfinished() ListOption { return func(o *objectIteratorOptions) { o.unfinished = true } } // ListPrefix will restrict the output to objects whose names begin with // prefix. func ListPrefix(pfx string) ListOption { return func(o *objectIteratorOptions) { o.prefix = pfx } } // ListDelimiter denotes the path separator. If set, object listings will be // truncated at this character. // // For example, if the bucket contains objects foo/bar, foo/baz, and foo, // then a delimiter of "/" will cause the listing to return "foo" and "foo/". // Otherwise, the listing would have returned all object names. // // Note that objects returned that end in the delimiter may not be actual // objects, e.g. you cannot read from (or write to, or delete) an object // "foo/", both because no actual object exists and because B2 disallows object // names that end with "/". If you want to ensure that all objects returned // are actual objects, leave this unset. func ListDelimiter(delimiter string) ListOption { return func(o *objectIteratorOptions) { o.delimiter = delimiter } } // ListPageSize configures the iterator to request the given number of objects // per network round-trip. The default (and maximum) is 1000 objects, except // for unfinished large files, which is 100. func ListPageSize(count int) ListOption { return func(o *objectIteratorOptions) { o.pageSize = count } } // ListLocker passes the iterator a lock which will be held during network // round-trips. func ListLocker(l sync.Locker) ListOption { return func(o *objectIteratorOptions) { o.locker = l } } blazer-0.4.4/b2/monitor.go000066400000000000000000000134561330654575600153460ustar00rootroot00000000000000// Copyright 2017, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "fmt" "html/template" "math" "net/http" "sort" "time" "github.com/kurin/blazer/internal/b2assets" "github.com/kurin/blazer/x/window" ) // StatusInfo reports information about a client. type StatusInfo struct { // Writers contains the status of all current uploads with progress. Writers map[string]*WriterStatus // Readers contains the status of all current downloads with progress. Readers map[string]*ReaderStatus // RPCs contains information about recently made RPC calls over the last // minute, five minutes, hour, and for all time. RPCs map[time.Duration]MethodList } // MethodList is an accumulation of RPC calls that have been made over a given // period of time. type MethodList []method // CountByMethod returns the total RPC calls made per method. func (ml MethodList) CountByMethod() map[string]int { r := make(map[string]int) for i := range ml { r[ml[i].name]++ } return r } type method struct { name string duration time.Duration status int } type methodCounter struct { d time.Duration w *window.Window } func (mc methodCounter) record(m method) { mc.w.Insert([]method{m}) } func (mc methodCounter) retrieve() MethodList { ms := mc.w.Reduce() return MethodList(ms.([]method)) } func newMethodCounter(d, res time.Duration) methodCounter { r := func(i, j interface{}) interface{} { a, ok := i.([]method) if !ok { a = nil } b, ok := j.([]method) if !ok { b = nil } for _, m := range b { a = append(a, m) } return a } return methodCounter{ d: d, w: window.New(d, res, r), } } // WriterStatus reports the status for each writer. type WriterStatus struct { // Progress is a slice of completion ratios. The index of a ratio is its // chunk id less one. Progress []float64 } // ReaderStatus reports the status for each reader. type ReaderStatus struct { // Progress is a slice of completion ratios. The index of a ratio is its // chunk id less one. Progress []float64 } // Status returns information about the current state of the client. func (c *Client) Status() *StatusInfo { c.slock.Lock() defer c.slock.Unlock() si := &StatusInfo{ Writers: make(map[string]*WriterStatus), Readers: make(map[string]*ReaderStatus), RPCs: make(map[time.Duration]MethodList), } for name, w := range c.sWriters { si.Writers[name] = w.status() } for name, r := range c.sReaders { si.Readers[name] = r.status() } for _, c := range c.sMethods { si.RPCs[c.d] = c.retrieve() } return si } func (si *StatusInfo) table() map[string]map[string]int { r := make(map[string]map[string]int) for d, c := range si.RPCs { for _, m := range c { if _, ok := r[m.name]; !ok { r[m.name] = make(map[string]int) } dur := "all time" if d > 0 { dur = d.String() } r[m.name][dur]++ } } return r } func (c *Client) addWriter(w *Writer) { c.slock.Lock() defer c.slock.Unlock() if c.sWriters == nil { c.sWriters = make(map[string]*Writer) } c.sWriters[fmt.Sprintf("%s/%s", w.o.b.Name(), w.name)] = w } func (c *Client) removeWriter(w *Writer) { c.slock.Lock() defer c.slock.Unlock() if c.sWriters == nil { return } delete(c.sWriters, fmt.Sprintf("%s/%s", w.o.b.Name(), w.name)) } func (c *Client) addReader(r *Reader) { c.slock.Lock() defer c.slock.Unlock() if c.sReaders == nil { c.sReaders = make(map[string]*Reader) } c.sReaders[fmt.Sprintf("%s/%s", r.o.b.Name(), r.name)] = r } func (c *Client) removeReader(r *Reader) { c.slock.Lock() defer c.slock.Unlock() if c.sReaders == nil { return } delete(c.sReaders, fmt.Sprintf("%s/%s", r.o.b.Name(), r.name)) } var ( funcMap = template.FuncMap{ "inc": func(i int) int { return i + 1 }, "lookUp": func(m map[string]int, s string) int { return m[s] }, "pRange": func(i int) string { f := float64(i) min := int(math.Pow(2, f)) - 1 max := min + int(math.Pow(2, f)) return fmt.Sprintf("%v - %v", time.Duration(min)*time.Millisecond, time.Duration(max)*time.Millisecond) }, "methods": func(si *StatusInfo) []string { methods := make(map[string]bool) for _, ms := range si.RPCs { for _, m := range ms { methods[m.name] = true } } var names []string for name := range methods { names = append(names, name) } sort.Strings(names) return names }, "durations": func(si *StatusInfo) []string { var ds []time.Duration for d := range si.RPCs { ds = append(ds, d) } sort.Slice(ds, func(i, j int) bool { return ds[i] < ds[j] }) var r []string for _, d := range ds { dur := "all time" if d > 0 { dur = d.String() } r = append(r, dur) } return r }, "table": func(si *StatusInfo) map[string]map[string]int { return si.table() }, } statusTemplate = template.Must(template.New("status").Funcs(funcMap).Parse(string(b2assets.MustAsset("data/status.html")))) ) // ServeHTTP serves diagnostic information about the current state of the // client; essentially everything available from Client.Status() // // ServeHTTP satisfies the http.Handler interface. This means that a Client // can be passed directly to a path via http.Handle (or on a custom ServeMux or // a custom http.Server). func (c *Client) ServeHTTP(rw http.ResponseWriter, req *http.Request) { info := c.Status() statusTemplate.Execute(rw, info) } blazer-0.4.4/b2/reader.go000066400000000000000000000136311330654575600151140ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "bytes" "context" "errors" "io" "sync" "github.com/kurin/blazer/internal/blog" ) var errNoMoreContent = errors.New("416: out of content") // Reader reads files from B2. type Reader struct { // ConcurrentDownloads is the number of simultaneous downloads to pull from // B2. Values greater than one will cause B2 to make multiple HTTP requests // for a given file, increasing available bandwidth at the cost of buffering // the downloads in memory. ConcurrentDownloads int // ChunkSize is the size to fetch per ConcurrentDownload. The default is // 10MB. ChunkSize int ctx context.Context cancel context.CancelFunc // cancels ctx o *Object name string offset int64 // the start of the file length int64 // the length to read, or -1 csize int // chunk size read int // amount read chwid int // chunks written chrid int // chunks read chbuf chan *rchunk init sync.Once rmux sync.Mutex // guards rcond rcond *sync.Cond chunks map[int]*rchunk emux sync.RWMutex // guards err, believe it or not err error smux sync.Mutex smap map[int]*meteredReader } type rchunk struct { bytes.Buffer final bool } // Close frees resources associated with the download. func (r *Reader) Close() error { r.cancel() r.o.b.c.removeReader(r) return nil } func (r *Reader) setErr(err error) { r.emux.Lock() defer r.emux.Unlock() if r.err == nil { r.err = err r.cancel() } } func (r *Reader) setErrNoCancel(err error) { r.emux.Lock() defer r.emux.Unlock() if r.err == nil { r.err = err } } func (r *Reader) getErr() error { r.emux.RLock() defer r.emux.RUnlock() return r.err } func (r *Reader) thread() { go func() { for { var buf *rchunk select { case b, ok := <-r.chbuf: if !ok { return } buf = b case <-r.ctx.Done(): return } r.rmux.Lock() chunkID := r.chwid r.chwid++ r.rmux.Unlock() offset := int64(chunkID*r.csize) + r.offset size := int64(r.csize) if r.length > 0 { if size > r.length { buf.final = true size = r.length } r.length -= size } redo: fr, err := r.o.b.b.downloadFileByName(r.ctx, r.name, offset, size) if err == errNoMoreContent { // this read generated a 416 so we are entirely past the end of the object buf.final = true r.rmux.Lock() r.chunks[chunkID] = buf r.rmux.Unlock() r.rcond.Broadcast() return } if err != nil { r.setErr(err) r.rcond.Broadcast() return } rsize, _, _, _ := fr.stats() mr := &meteredReader{r: noopResetter{fr}, size: int(rsize)} r.smux.Lock() r.smap[chunkID] = mr r.smux.Unlock() i, err := copyContext(r.ctx, buf, mr) fr.Close() r.smux.Lock() r.smap[chunkID] = nil r.smux.Unlock() if i < int64(rsize) || err == io.ErrUnexpectedEOF { // Probably the network connection was closed early. Retry. blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying", chunkID, i, rsize) buf.Reset() goto redo } if err != nil { r.setErr(err) r.rcond.Broadcast() return } r.rmux.Lock() r.chunks[chunkID] = buf r.rmux.Unlock() r.rcond.Broadcast() } }() } func (r *Reader) curChunk() (*rchunk, error) { ch := make(chan *rchunk) go func() { r.rmux.Lock() defer r.rmux.Unlock() for r.chunks[r.chrid] == nil && r.getErr() == nil && r.ctx.Err() == nil { r.rcond.Wait() } select { case ch <- r.chunks[r.chrid]: case <-r.ctx.Done(): return } }() select { case buf := <-ch: return buf, r.getErr() case <-r.ctx.Done(): if r.getErr() != nil { return nil, r.getErr() } return nil, r.ctx.Err() } } func (r *Reader) initFunc() { r.smux.Lock() r.smap = make(map[int]*meteredReader) r.smux.Unlock() r.o.b.c.addReader(r) r.rcond = sync.NewCond(&r.rmux) cr := r.ConcurrentDownloads if cr < 1 { cr = 1 } if r.ChunkSize < 1 { r.ChunkSize = 1e7 } r.csize = r.ChunkSize r.chbuf = make(chan *rchunk, cr) for i := 0; i < cr; i++ { r.thread() r.chbuf <- &rchunk{} } } func (r *Reader) Read(p []byte) (int, error) { if err := r.getErr(); err != nil { return 0, err } // TODO: check the SHA1 hash here and verify it on Close. r.init.Do(r.initFunc) chunk, err := r.curChunk() if err != nil { r.setErrNoCancel(err) return 0, err } n, err := chunk.Read(p) r.read += n if err == io.EOF { if chunk.final { close(r.chbuf) r.setErrNoCancel(err) return n, err } r.chrid++ chunk.Reset() r.chbuf <- chunk err = nil } r.setErrNoCancel(err) return n, err } func (r *Reader) status() *ReaderStatus { r.smux.Lock() defer r.smux.Unlock() rs := &ReaderStatus{ Progress: make([]float64, len(r.smap)), } for i := 1; i <= len(r.smap); i++ { rs.Progress[i-1] = r.smap[i].done() } return rs } // copied from io.Copy, basically. func copyContext(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) { buf := make([]byte, 32*1024) for { if ctx.Err() != nil { err = ctx.Err() return } nr, er := src.Read(buf) if nr > 0 { nw, ew := dst.Write(buf[0:nr]) if nw > 0 { written += int64(nw) } if ew != nil { err = ew break } if nr != nw { err = io.ErrShortWrite break } } if er == io.EOF { break } if er != nil { err = er break } } return written, err } type noopResetter struct { io.Reader } func (noopResetter) Reset() error { return nil } blazer-0.4.4/b2/readerat.go000066400000000000000000000023021330654575600154320ustar00rootroot00000000000000// Copyright 2017, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "io" "sync" ) type readerAt struct { rs io.ReadSeeker mu sync.Mutex } func (r *readerAt) ReadAt(p []byte, off int64) (int, error) { r.mu.Lock() defer r.mu.Unlock() // ReadAt is supposed to preserve the offset. cur, err := r.rs.Seek(0, io.SeekCurrent) if err != nil { return 0, err } defer r.rs.Seek(cur, io.SeekStart) if _, err := r.rs.Seek(off, io.SeekStart); err != nil { return 0, err } return io.ReadFull(r.rs, p) } // wraps a ReadSeeker in a mutex to provite a ReaderAt how is this not in the // io package? func enReaderAt(rs io.ReadSeeker) io.ReaderAt { return &readerAt{rs: rs} } blazer-0.4.4/b2/writer.go000066400000000000000000000277321330654575600151750ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "context" "errors" "fmt" "io" "sync" "sync/atomic" "time" "github.com/kurin/blazer/internal/blog" ) // Writer writes data into Backblaze. It automatically switches to the large // file API if the file exceeds ChunkSize bytes. Due to that and other // Backblaze API details, there is a large buffer. // // Changes to public Writer attributes must be made before the first call to // Write. type Writer struct { // ConcurrentUploads is number of different threads sending data concurrently // to Backblaze for large files. This can increase performance greatly, as // each thread will hit a different endpoint. However, there is a ChunkSize // buffer for each thread. Values less than 1 are equivalent to 1. ConcurrentUploads int // Resume an upload. If true, and the upload is a large file, and a file of // the same name was started but not finished, then assume that we are // resuming that file, and don't upload duplicate chunks. Resume bool // ChunkSize is the size, in bytes, of each individual part, when writing // large files, and also when determining whether to upload a file normally // or when to split it into parts. The default is 100M (1e8) The minimum is // 5M (5e6); values less than this are not an error, but will fail. The // maximum is 5GB (5e9). ChunkSize int // UseFileBuffer controls whether to use an in-memory buffer (the default) or // scratch space on the file system. If this is true, b2 will save chunks in // FileBufferDir. UseFileBuffer bool // FileBufferDir specifies the directory where scratch files are kept. If // blank, os.TempDir() is used. FileBufferDir string contentType string info map[string]string csize int ctx context.Context cancel context.CancelFunc ready chan chunk wg sync.WaitGroup start sync.Once once sync.Once done sync.Once file beLargeFileInterface seen map[int]string everStarted bool newBuffer func() (writeBuffer, error) o *Object name string cidx int w writeBuffer emux sync.RWMutex err error smux sync.RWMutex smap map[int]*meteredReader } type chunk struct { id int buf writeBuffer } func (w *Writer) setErr(err error) { if err == nil || err == io.EOF { return } w.emux.Lock() defer w.emux.Unlock() if w.err == nil { blog.V(1).Infof("error writing %s: %v", w.name, err) w.err = err w.cancel() } } func (w *Writer) getErr() error { w.emux.RLock() defer w.emux.RUnlock() return w.err } func (w *Writer) registerChunk(id int, r *meteredReader) { w.smux.Lock() w.smap[id] = r w.smux.Unlock() } func (w *Writer) completeChunk(id int) { w.smux.Lock() w.smap[id] = nil w.smux.Unlock() } var gid int32 func (w *Writer) thread() { w.wg.Add(1) go func() { defer w.wg.Done() id := atomic.AddInt32(&gid, 1) fc, err := w.file.getUploadPartURL(w.ctx) if err != nil { w.setErr(err) return } for { chunk, ok := <-w.ready if !ok { return } if sha, ok := w.seen[chunk.id]; ok { if sha != chunk.buf.Hash() { w.setErr(errors.New("resumable upload was requested, but chunks don't match")) return } chunk.buf.Close() w.completeChunk(chunk.id) blog.V(2).Infof("skipping chunk %d", chunk.id) continue } blog.V(2).Infof("thread %d handling chunk %d", id, chunk.id) r, err := chunk.buf.Reader() if err != nil { w.setErr(err) return } mr := &meteredReader{r: r, size: chunk.buf.Len()} w.registerChunk(chunk.id, mr) sleep := time.Millisecond * 15 redo: n, err := fc.uploadPart(w.ctx, mr, chunk.buf.Hash(), chunk.buf.Len(), chunk.id) if n != chunk.buf.Len() || err != nil { if w.o.b.r.reupload(err) { time.Sleep(sleep) sleep *= 2 if sleep > time.Second*15 { sleep = time.Second * 15 } blog.V(1).Infof("b2 writer: wrote %d of %d: error: %v; retrying", n, chunk.buf.Len(), err) f, err := w.file.getUploadPartURL(w.ctx) if err != nil { w.setErr(err) w.completeChunk(chunk.id) chunk.buf.Close() // TODO: log error return } fc = f goto redo } w.setErr(err) w.completeChunk(chunk.id) chunk.buf.Close() // TODO: log error return } w.completeChunk(chunk.id) chunk.buf.Close() // TODO: log error blog.V(2).Infof("chunk %d handled", chunk.id) } }() } func (w *Writer) init() { w.start.Do(func() { w.everStarted = true w.smux.Lock() w.smap = make(map[int]*meteredReader) w.smux.Unlock() w.o.b.c.addWriter(w) w.csize = w.ChunkSize if w.csize == 0 { w.csize = 1e8 } if w.newBuffer == nil { w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil } if w.UseFileBuffer { w.newBuffer = func() (writeBuffer, error) { return newFileBuffer(w.FileBufferDir) } } } v, err := w.newBuffer() if err != nil { w.setErr(err) return } w.w = v }) } // Write satisfies the io.Writer interface. func (w *Writer) Write(p []byte) (int, error) { w.init() if err := w.getErr(); err != nil { return 0, err } left := w.csize - w.w.Len() if len(p) < left { return w.w.Write(p) } i, err := w.w.Write(p[:left]) if err != nil { w.setErr(err) return i, err } if err := w.sendChunk(); err != nil { w.setErr(err) return i, w.getErr() } k, err := w.Write(p[left:]) if err != nil { w.setErr(err) } return i + k, err } func (w *Writer) getUploadURL(ctx context.Context) (beURLInterface, error) { u := w.o.b.urlPool.get() if u == nil { return w.o.b.b.getUploadURL(w.ctx) } return u, nil } func (w *Writer) simpleWriteFile() error { ue, err := w.getUploadURL(w.ctx) if err != nil { return err } // This defer needs to be in a func() so that we put whatever the value of ue // is at function exit. defer func() { w.o.b.urlPool.put(ue) }() sha1 := w.w.Hash() ctype := w.contentType if ctype == "" { ctype = "application/octet-stream" } r, err := w.w.Reader() if err != nil { return err } mr := &meteredReader{r: r, size: w.w.Len()} w.registerChunk(1, mr) defer w.completeChunk(1) redo: f, err := ue.uploadFile(w.ctx, mr, int(w.w.Len()), w.name, ctype, sha1, w.info) if err != nil { if w.o.b.r.reupload(err) { blog.V(2).Infof("b2 writer: %v; retrying", err) u, err := w.o.b.b.getUploadURL(w.ctx) if err != nil { return err } ue = u goto redo } return err } w.o.f = f return nil } func (w *Writer) getLargeFile() (beLargeFileInterface, error) { if !w.Resume { ctype := w.contentType if ctype == "" { ctype = "application/octet-stream" } return w.o.b.b.startLargeFile(w.ctx, w.name, ctype, w.info) } next := 1 seen := make(map[int]string) var size int64 var fi beFileInterface for { cur := &Cursor{name: w.name} objs, _, err := w.o.b.ListObjects(w.ctx, 1, cur) if err != nil { return nil, err } if len(objs) < 1 || objs[0].name != w.name { w.Resume = false return w.getLargeFile() } fi = objs[0].f parts, n, err := fi.listParts(w.ctx, next, 100) if err != nil { return nil, err } next = n for _, p := range parts { seen[p.number()] = p.sha1() size += p.size() } if len(parts) == 0 { break } if next == 0 { break } } w.seen = make(map[int]string) // copy the map for id, sha := range seen { w.seen[id] = sha } return fi.compileParts(size, seen), nil } func (w *Writer) sendChunk() error { var err error w.once.Do(func() { lf, e := w.getLargeFile() if e != nil { err = e return } w.file = lf w.ready = make(chan chunk) if w.ConcurrentUploads < 1 { w.ConcurrentUploads = 1 } for i := 0; i < w.ConcurrentUploads; i++ { w.thread() } }) if err != nil { return err } select { case w.ready <- chunk{ id: w.cidx + 1, buf: w.w, }: case <-w.ctx.Done(): return w.ctx.Err() } w.cidx++ v, err := w.newBuffer() if err != nil { return err } w.w = v return nil } // ReadFrom reads all of r into w, returning the first error or no error if r // returns io.EOF. If r is also an io.Seeker, ReadFrom will stream r directly // over the wire instead of buffering it locally. This reduces memory usage. // // Do not issue multiple calls to ReadFrom, or mix ReadFrom and Write. If you // have multiple readers you want to concatenate into the same B2 object, use // an io.MultiReader. // // Note that io.Copy will automatically choose to use ReadFrom. // // ReadFrom currently doesn't handle w.Resume; if w.Resume is true, ReadFrom // will act as if r is not an io.Seeker. func (w *Writer) ReadFrom(r io.Reader) (int64, error) { rs, ok := r.(io.ReadSeeker) if !ok || w.Resume { return copyContext(w.ctx, w, r) } blog.V(2).Info("streaming without buffer") size, err := rs.Seek(0, io.SeekEnd) if err != nil { return 0, err } var ra io.ReaderAt if rat, ok := r.(io.ReaderAt); ok { ra = rat } else { ra = enReaderAt(rs) } var offset int64 var wrote int64 w.newBuffer = func() (writeBuffer, error) { left := size - offset if left <= 0 { // We're done sending real chunks; send empty chunks from now on so that // Close() works. w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil } w.w = newMemoryBuffer() return nil, io.EOF } csize := int64(w.csize) if left < csize { csize = left } nb := newNonBuffer(ra, offset, csize) wrote += csize // TODO: this is kind of a total lie offset += csize return nb, nil } w.init() if size < int64(w.csize) { // the magic happens on w.Close() return size, nil } for { if err := w.sendChunk(); err != nil { if err != io.EOF { return wrote, err } return wrote, nil } } } // Close satisfies the io.Closer interface. It is critical to check the return // value of Close for all writers. func (w *Writer) Close() error { w.done.Do(func() { if !w.everStarted { return } defer w.o.b.c.removeWriter(w) defer func() { if err := w.w.Close(); err != nil { // this is non-fatal, but alarming blog.V(1).Infof("close %s: %v", w.name, err) } }() if w.cidx == 0 { w.setErr(w.simpleWriteFile()) return } if w.w.Len() > 0 { if err := w.sendChunk(); err != nil { w.setErr(err) return } } close(w.ready) w.wg.Wait() f, err := w.file.finishLargeFile(w.ctx) if err != nil { w.setErr(err) return } w.o.f = f }) return w.getErr() } // WithAttrs sets the writable attributes of the resulting file to given // values. WithAttrs must be called before the first call to Write. func (w *Writer) WithAttrs(attrs *Attrs) *Writer { w.contentType = attrs.ContentType w.info = make(map[string]string) for k, v := range attrs.Info { w.info[k] = v } if len(w.info) < 10 && !attrs.LastModified.IsZero() { w.info["src_last_modified_millis"] = fmt.Sprintf("%d", attrs.LastModified.UnixNano()/1e6) } return w } func (w *Writer) status() *WriterStatus { w.smux.RLock() defer w.smux.RUnlock() ws := &WriterStatus{ Progress: make([]float64, len(w.smap)), } for i := 1; i <= len(w.smap); i++ { ws.Progress[i-1] = w.smap[i].done() } return ws } type meteredReader struct { read int64 size int r readResetter mux sync.Mutex } func (mr *meteredReader) Read(p []byte) (int, error) { mr.mux.Lock() defer mr.mux.Unlock() n, err := mr.r.Read(p) mr.read += int64(n) return n, err } func (mr *meteredReader) Reset() error { mr.mux.Lock() defer mr.mux.Unlock() mr.read = 0 return mr.r.Reset() } func (mr *meteredReader) done() float64 { if mr == nil { return 1 } read := float64(atomic.LoadInt64(&mr.read)) return read / float64(mr.size) } blazer-0.4.4/base/000077500000000000000000000000001330654575600137265ustar00rootroot00000000000000blazer-0.4.4/base/base.go000066400000000000000000000772111330654575600151770ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package base provides a very low-level interface on top of the B2 v1 API. // It is not intended to be used directly. // // It currently lacks support for the following APIs: // // b2_download_file_by_id package base import ( "bytes" "context" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "regexp" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/kurin/blazer/internal/b2types" "github.com/kurin/blazer/internal/blog" ) const ( APIBase = "https://api.backblazeb2.com" DefaultUserAgent = "blazer/0.4.4" ) type b2err struct { msg string method string retry int code int } func (e b2err) Error() string { if e.method == "" { return fmt.Sprintf("b2 error: %s", e.msg) } return fmt.Sprintf("%s: %d: %s", e.method, e.code, e.msg) } // Action checks an error and returns a recommended course of action. func Action(err error) ErrAction { e, ok := err.(b2err) if !ok { return Punt } if e.retry > 0 { return Retry } if e.code >= 500 && e.code < 600 && (e.method == "b2_upload_file" || e.method == "b2_upload_part") { return AttemptNewUpload } switch e.code { case 401: switch e.method { case "b2_authorize_account": return Punt case "b2_upload_file", "b2_upload_part": return AttemptNewUpload } return ReAuthenticate case 400: // See restic/restic#1207 if e.method == "b2_upload_file" && strings.HasPrefix(e.msg, "more than one upload using auth token") { return AttemptNewUpload } return Punt case 408: return AttemptNewUpload case 429, 500, 503: return Retry } return Punt } // ErrAction is an action that a caller can take when any function returns an // error. type ErrAction int // Code returns the error code and message. func Code(err error) (int, string) { e, ok := err.(b2err) if !ok { return 0, "" } return e.code, e.msg } const ( // ReAuthenticate indicates that the B2 account authentication tokens have // expired, and should be refreshed with a new call to AuthorizeAccount. ReAuthenticate ErrAction = iota // AttemptNewUpload indicates that an upload's authentication token (or URL // endpoint) has expired, and that users should request new ones with a call // to GetUploadURL or GetUploadPartURL. AttemptNewUpload // Retry indicates that the caller should wait an appropriate amount of time, // and then reattempt the RPC. Retry // Punt means that there is no useful action to be taken on this error, and // that it should be displayed to the user. Punt ) func mkErr(resp *http.Response) error { data, err := ioutil.ReadAll(resp.Body) var msgBody string if err != nil { msgBody = fmt.Sprintf("couldn't read message body: %v", err) } logResponse(resp, data) msg := &b2types.ErrorMessage{} if err := json.Unmarshal(data, msg); err != nil { if msgBody != "" { msgBody = fmt.Sprintf("couldn't read message body: %v", err) } } if msgBody == "" { msgBody = msg.Msg } var retryAfter int retry := resp.Header.Get("Retry-After") if retry != "" { r, err := strconv.ParseInt(retry, 10, 64) if err != nil { r = 0 blog.V(1).Infof("couldn't parse retry-after header %q: %v", retry, err) } retryAfter = int(r) } return b2err{ msg: msgBody, retry: retryAfter, code: resp.StatusCode, method: resp.Request.Header.Get("X-Blazer-Method"), } } // Backoff returns an appropriate amount of time to wait, given an error, if // any was returned by the server. If the return value is 0, but Action // indicates Retry, the user should implement their own exponential backoff, // beginning with one second. func Backoff(err error) time.Duration { e, ok := err.(b2err) if !ok { return 0 } return time.Duration(e.retry) * time.Second } func logRequest(req *http.Request, args []byte) { if !blog.V(2) { return } var headers []string for k, v := range req.Header { if k == "Authorization" || k == "X-Blazer-Method" { continue } headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) } hstr := strings.Join(headers, ";") method := req.Header.Get("X-Blazer-Method") if args != nil { blog.V(2).Infof(">> %s uri: %v headers: {%s} args: (%s)", method, req.URL, hstr, string(args)) return } blog.V(2).Infof(">> %s uri: %v {%s} (no args)", method, req.URL, hstr) } var authRegexp = regexp.MustCompile(`"authorizationToken": ".[^"]*"`) func logResponse(resp *http.Response, reply []byte) { if !blog.V(2) { return } var headers []string for k, v := range resp.Header { headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) } hstr := strings.Join(headers, "; ") method := resp.Request.Header.Get("X-Blazer-Method") id := resp.Request.Header.Get("X-Blazer-Request-ID") if reply != nil { safe := string(authRegexp.ReplaceAll(reply, []byte(`"authorizationToken": "[redacted]"`))) blog.V(2).Infof("<< %s (%s) %s {%s} (%s)", method, id, resp.Status, hstr, safe) return } blog.V(2).Infof("<< %s (%s) %s {%s} (no reply)", method, id, resp.Status, hstr) } func millitime(t int64) time.Time { return time.Unix(t/1000, t%1000*1e6) } type b2Options struct { transport http.RoundTripper failSomeUploads bool expireTokens bool capExceeded bool apiBase string userAgent string } func (o *b2Options) addHeaders(req *http.Request) { if o.failSomeUploads { req.Header.Add("X-Bz-Test-Mode", "fail_some_uploads") } if o.expireTokens { req.Header.Add("X-Bz-Test-Mode", "expire_some_account_authorization_tokens") } if o.capExceeded { req.Header.Add("X-Bz-Test-Mode", "force_cap_exceeded") } req.Header.Set("User-Agent", o.getUserAgent()) } func (o *b2Options) getAPIBase() string { if o.apiBase != "" { return o.apiBase } return APIBase } func (o *b2Options) getUserAgent() string { if o.userAgent != "" { return fmt.Sprintf("%s %s", o.userAgent, DefaultUserAgent) } return DefaultUserAgent } func (o *b2Options) getTransport() http.RoundTripper { if o.transport == nil { return http.DefaultTransport } return o.transport } // B2 holds account information for Backblaze. type B2 struct { accountID string authToken string apiURI string downloadURI string minPartSize int opts *b2Options } // Update replaces the B2 object with a new one, in-place. func (b *B2) Update(n *B2) { b.accountID = n.accountID b.authToken = n.authToken b.apiURI = n.apiURI b.downloadURI = n.downloadURI b.minPartSize = n.minPartSize b.opts = n.opts } type httpReply struct { resp *http.Response err error } func makeNetRequest(ctx context.Context, req *http.Request, rt http.RoundTripper) (*http.Response, error) { req = req.WithContext(ctx) resp, err := rt.RoundTrip(req) switch err { case nil: return resp, nil case context.Canceled, context.DeadlineExceeded: return nil, err default: method := req.Header.Get("X-Blazer-Method") blog.V(2).Infof(">> %s uri: %v err: %v", method, req.URL, err) return nil, b2err{ msg: err.Error(), retry: 1, } } } type requestBody struct { size int64 body io.Reader } func (rb *requestBody) getSize() int64 { if rb == nil { return 0 } return rb.size } func (rb *requestBody) getBody() io.Reader { if rb == nil { return nil } return rb.body } type keepFinalBytes struct { r io.Reader remain int sha [40]byte } func (k *keepFinalBytes) Read(p []byte) (int, error) { n, err := k.r.Read(p) if k.remain-n > 40 { k.remain -= n return n, err } // This was a whole lot harder than it looks. pi := -40 + k.remain if pi < 0 { pi = 0 } pe := n ki := 40 - k.remain if ki < 0 { ki = 0 } ke := n - k.remain + 40 copy(k.sha[ki:ke], p[pi:pe]) k.remain -= n return n, err } var reqID int64 func (o *b2Options) makeRequest(ctx context.Context, method, verb, uri string, b2req, b2resp interface{}, headers map[string]string, body *requestBody) error { var args []byte if b2req != nil { enc, err := json.Marshal(b2req) if err != nil { return err } args = enc body = &requestBody{ body: bytes.NewBuffer(enc), size: int64(len(enc)), } } req, err := http.NewRequest(verb, uri, body.getBody()) if err != nil { return err } req.ContentLength = body.getSize() for k, v := range headers { if strings.HasPrefix(k, "X-Bz-Info") || strings.HasPrefix(k, "X-Bz-File-Name") { v = escape(v) } req.Header.Set(k, v) } req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) req.Header.Set("X-Blazer-Method", method) o.addHeaders(req) logRequest(req, args) resp, err := makeNetRequest(ctx, req, o.getTransport()) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != 200 { return mkErr(resp) } var replyArgs []byte if b2resp != nil { rbuf := &bytes.Buffer{} r := io.TeeReader(resp.Body, rbuf) decoder := json.NewDecoder(r) if err := decoder.Decode(b2resp); err != nil { return err } replyArgs = rbuf.Bytes() } else { ra, err := ioutil.ReadAll(resp.Body) if err != nil { blog.V(1).Infof("%s: couldn't read response: %v", method, err) } replyArgs = ra } logResponse(resp, replyArgs) return nil } // AuthorizeAccount wraps b2_authorize_account. func AuthorizeAccount(ctx context.Context, account, key string, opts ...AuthOption) (*B2, error) { auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", account, key))) b2resp := &b2types.AuthorizeAccountResponse{} headers := map[string]string{ "Authorization": fmt.Sprintf("Basic %s", auth), } b2opts := &b2Options{} for _, f := range opts { f(b2opts) } if err := b2opts.makeRequest(ctx, "b2_authorize_account", "GET", b2opts.getAPIBase()+b2types.V1api+"b2_authorize_account", nil, b2resp, headers, nil); err != nil { return nil, err } return &B2{ accountID: b2resp.AccountID, authToken: b2resp.AuthToken, apiURI: b2resp.URI, downloadURI: b2resp.DownloadURI, minPartSize: b2resp.MinPartSize, opts: b2opts, }, nil } // An AuthOption allows callers to choose per-session settings. type AuthOption func(*b2Options) // UserAgent sets the User-Agent HTTP header. The default header is // "blazer/"; the value set here will be prepended to that. This can // be set multiple times. func UserAgent(agent string) AuthOption { return func(o *b2Options) { if o.userAgent == "" { o.userAgent = agent return } o.userAgent = fmt.Sprintf("%s %s", agent, o.userAgent) } } // Transport returns an AuthOption that sets the underlying HTTP mechanism. func Transport(rt http.RoundTripper) AuthOption { return func(o *b2Options) { o.transport = rt } } // FailSomeUploads requests intermittent upload failures from the B2 service. // This is mostly useful for testing. func FailSomeUploads() AuthOption { return func(o *b2Options) { o.failSomeUploads = true } } // ExpireSomeAuthTokens requests intermittent authentication failures from the // B2 service. func ExpireSomeAuthTokens() AuthOption { return func(o *b2Options) { o.expireTokens = true } } // ForceCapExceeded requests a cap limit from the B2 service. This causes all // uploads to be treated as if they would exceed the configure B2 capacity. func ForceCapExceeded() AuthOption { return func(o *b2Options) { o.capExceeded = true } } type LifecycleRule struct { Prefix string DaysNewUntilHidden int DaysHiddenUntilDeleted int } // CreateBucket wraps b2_create_bucket. func (b *B2) CreateBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (*Bucket, error) { if btype != "allPublic" { btype = "allPrivate" } var b2rules []b2types.LifecycleRule for _, rule := range rules { b2rules = append(b2rules, b2types.LifecycleRule{ Prefix: rule.Prefix, DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, }) } b2req := &b2types.CreateBucketRequest{ AccountID: b.accountID, Name: name, Type: btype, Info: info, LifecycleRules: b2rules, } b2resp := &b2types.CreateBucketResponse{} headers := map[string]string{ "Authorization": b.authToken, } if err := b.opts.makeRequest(ctx, "b2_create_bucket", "POST", b.apiURI+b2types.V1api+"b2_create_bucket", b2req, b2resp, headers, nil); err != nil { return nil, err } var respRules []LifecycleRule for _, rule := range b2resp.LifecycleRules { respRules = append(respRules, LifecycleRule{ Prefix: rule.Prefix, DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, }) } return &Bucket{ Name: name, Info: b2resp.Info, LifecycleRules: respRules, id: b2resp.BucketID, rev: b2resp.Revision, b2: b, }, nil } // DeleteBucket wraps b2_delete_bucket. func (b *Bucket) DeleteBucket(ctx context.Context) error { b2req := &b2types.DeleteBucketRequest{ AccountID: b.b2.accountID, BucketID: b.id, } headers := map[string]string{ "Authorization": b.b2.authToken, } return b.b2.opts.makeRequest(ctx, "b2_delete_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_delete_bucket", b2req, nil, headers, nil) } // Bucket holds B2 bucket details. type Bucket struct { Name string Type string Info map[string]string LifecycleRules []LifecycleRule id string rev int b2 *B2 } // Update wraps b2_update_bucket. func (b *Bucket) Update(ctx context.Context) (*Bucket, error) { var rules []b2types.LifecycleRule for _, rule := range b.LifecycleRules { rules = append(rules, b2types.LifecycleRule{ DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, Prefix: rule.Prefix, }) } b2req := &b2types.UpdateBucketRequest{ AccountID: b.b2.accountID, BucketID: b.id, // Name: b.Name, Type: b.Type, Info: b.Info, LifecycleRules: rules, IfRevisionIs: b.rev, } headers := map[string]string{ "Authorization": b.b2.authToken, } b2resp := &b2types.UpdateBucketResponse{} if err := b.b2.opts.makeRequest(ctx, "b2_update_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_update_bucket", b2req, b2resp, headers, nil); err != nil { return nil, err } var respRules []LifecycleRule for _, rule := range b2resp.LifecycleRules { respRules = append(respRules, LifecycleRule{ Prefix: rule.Prefix, DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, }) } return &Bucket{ Name: b.Name, Type: b2resp.Type, Info: b2resp.Info, LifecycleRules: respRules, id: b2resp.BucketID, b2: b.b2, }, nil } // BaseURL returns the base part of the download URLs. func (b *Bucket) BaseURL() string { return b.b2.downloadURI } // ListBuckets wraps b2_list_buckets. func (b *B2) ListBuckets(ctx context.Context) ([]*Bucket, error) { b2req := &b2types.ListBucketsRequest{ AccountID: b.accountID, } b2resp := &b2types.ListBucketsResponse{} headers := map[string]string{ "Authorization": b.authToken, } if err := b.opts.makeRequest(ctx, "b2_list_buckets", "POST", b.apiURI+b2types.V1api+"b2_list_buckets", b2req, b2resp, headers, nil); err != nil { return nil, err } var buckets []*Bucket for _, bucket := range b2resp.Buckets { var rules []LifecycleRule for _, rule := range bucket.LifecycleRules { rules = append(rules, LifecycleRule{ Prefix: rule.Prefix, DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, }) } buckets = append(buckets, &Bucket{ Name: bucket.Name, Type: bucket.Type, Info: bucket.Info, LifecycleRules: rules, id: bucket.BucketID, rev: bucket.Revision, b2: b, }) } return buckets, nil } // URL holds information from the b2_get_upload_url API. type URL struct { uri string token string b2 *B2 bucket *Bucket } // Reload reloads URL in-place, by reissuing a b2_get_upload_url and // overwriting the previous values. func (url *URL) Reload(ctx context.Context) error { n, err := url.bucket.GetUploadURL(ctx) if err != nil { return err } url.uri = n.uri url.token = n.token return nil } // GetUploadURL wraps b2_get_upload_url. func (b *Bucket) GetUploadURL(ctx context.Context) (*URL, error) { b2req := &b2types.GetUploadURLRequest{ BucketID: b.id, } b2resp := &b2types.GetUploadURLResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_get_upload_url", "POST", b.b2.apiURI+b2types.V1api+"b2_get_upload_url", b2req, b2resp, headers, nil); err != nil { return nil, err } return &URL{ uri: b2resp.URI, token: b2resp.Token, b2: b.b2, bucket: b, }, nil } // File represents a B2 file. type File struct { Name string Size int64 Status string Timestamp time.Time Info *FileInfo id string b2 *B2 } // File returns a bare File struct, but with the appropriate id and b2 // interfaces. func (b *Bucket) File(id, name string) *File { return &File{id: id, b2: b.b2, Name: name} } // UploadFile wraps b2_upload_file. func (url *URL) UploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (*File, error) { headers := map[string]string{ "Authorization": url.token, "X-Bz-File-Name": name, "Content-Type": contentType, "Content-Length": fmt.Sprintf("%d", size), "X-Bz-Content-Sha1": sha1, } for k, v := range info { headers[fmt.Sprintf("X-Bz-Info-%s", k)] = v } b2resp := &b2types.UploadFileResponse{} if err := url.b2.opts.makeRequest(ctx, "b2_upload_file", "POST", url.uri, nil, b2resp, headers, &requestBody{body: r, size: int64(size)}); err != nil { return nil, err } return &File{ Name: name, Size: int64(size), Timestamp: millitime(b2resp.Timestamp), Status: b2resp.Action, id: b2resp.FileID, b2: url.b2, }, nil } // DeleteFileVersion wraps b2_delete_file_version. func (f *File) DeleteFileVersion(ctx context.Context) error { b2req := &b2types.DeleteFileVersionRequest{ Name: f.Name, FileID: f.id, } headers := map[string]string{ "Authorization": f.b2.authToken, } return f.b2.opts.makeRequest(ctx, "b2_delete_file_version", "POST", f.b2.apiURI+b2types.V1api+"b2_delete_file_version", b2req, nil, headers, nil) } // LargeFile holds information necessary to implement B2 large file support. type LargeFile struct { id string b2 *B2 mu sync.Mutex size int64 hashes map[int]string } // StartLargeFile wraps b2_start_large_file. func (b *Bucket) StartLargeFile(ctx context.Context, name, contentType string, info map[string]string) (*LargeFile, error) { b2req := &b2types.StartLargeFileRequest{ BucketID: b.id, Name: name, ContentType: contentType, Info: info, } b2resp := &b2types.StartLargeFileResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_start_large_file", "POST", b.b2.apiURI+b2types.V1api+"b2_start_large_file", b2req, b2resp, headers, nil); err != nil { return nil, err } return &LargeFile{ id: b2resp.ID, b2: b.b2, hashes: make(map[int]string), }, nil } // CancelLargeFile wraps b2_cancel_large_file. func (l *LargeFile) CancelLargeFile(ctx context.Context) error { b2req := &b2types.CancelLargeFileRequest{ ID: l.id, } headers := map[string]string{ "Authorization": l.b2.authToken, } return l.b2.opts.makeRequest(ctx, "b2_cancel_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_cancel_large_file", b2req, nil, headers, nil) } // FilePart is a piece of a started, but not finished, large file upload. type FilePart struct { Number int SHA1 string Size int64 } // ListParts wraps b2_list_parts. func (f *File) ListParts(ctx context.Context, next, count int) ([]*FilePart, int, error) { b2req := &b2types.ListPartsRequest{ ID: f.id, Start: next, Count: count, } b2resp := &b2types.ListPartsResponse{} headers := map[string]string{ "Authorization": f.b2.authToken, } if err := f.b2.opts.makeRequest(ctx, "b2_list_parts", "POST", f.b2.apiURI+b2types.V1api+"b2_list_parts", b2req, b2resp, headers, nil); err != nil { return nil, 0, err } var parts []*FilePart for _, part := range b2resp.Parts { parts = append(parts, &FilePart{ Number: part.Number, SHA1: part.SHA1, Size: part.Size, }) } return parts, b2resp.Next, nil } // CompileParts returns a LargeFile that can accept new data. Seen is a // mapping of completed part numbers to SHA1 strings; size is the total size of // all the completed parts to this point. func (f *File) CompileParts(size int64, seen map[int]string) *LargeFile { s := make(map[int]string) for k, v := range seen { s[k] = v } return &LargeFile{ id: f.id, b2: f.b2, size: size, hashes: s, } } // FileChunk holds information necessary for uploading file chunks. type FileChunk struct { url string token string file *LargeFile } type getUploadPartURLRequest struct { ID string `json:"fileId"` } type getUploadPartURLResponse struct { URL string `json:"uploadUrl"` Token string `json:"authorizationToken"` } // GetUploadPartURL wraps b2_get_upload_part_url. func (l *LargeFile) GetUploadPartURL(ctx context.Context) (*FileChunk, error) { b2req := &getUploadPartURLRequest{ ID: l.id, } b2resp := &getUploadPartURLResponse{} headers := map[string]string{ "Authorization": l.b2.authToken, } if err := l.b2.opts.makeRequest(ctx, "b2_get_upload_part_url", "POST", l.b2.apiURI+b2types.V1api+"b2_get_upload_part_url", b2req, b2resp, headers, nil); err != nil { return nil, err } return &FileChunk{ url: b2resp.URL, token: b2resp.Token, file: l, }, nil } // Reload reloads FileChunk in-place. func (fc *FileChunk) Reload(ctx context.Context) error { n, err := fc.file.GetUploadPartURL(ctx) if err != nil { return err } fc.url = n.url fc.token = n.token return nil } // UploadPart wraps b2_upload_part. func (fc *FileChunk) UploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (int, error) { headers := map[string]string{ "Authorization": fc.token, "X-Bz-Part-Number": fmt.Sprintf("%d", index), "Content-Length": fmt.Sprintf("%d", size), "X-Bz-Content-Sha1": sha1, } if sha1 == "hex_digits_at_end" { r = &keepFinalBytes{r: r, remain: size} } if err := fc.file.b2.opts.makeRequest(ctx, "b2_upload_part", "POST", fc.url, nil, nil, headers, &requestBody{body: r, size: int64(size)}); err != nil { return 0, err } fc.file.mu.Lock() if sha1 == "hex_digits_at_end" { sha1 = string(r.(*keepFinalBytes).sha[:]) } fc.file.hashes[index] = sha1 fc.file.size += int64(size) fc.file.mu.Unlock() return size, nil } // FinishLargeFile wraps b2_finish_large_file. func (l *LargeFile) FinishLargeFile(ctx context.Context) (*File, error) { l.mu.Lock() defer l.mu.Unlock() b2req := &b2types.FinishLargeFileRequest{ ID: l.id, Hashes: make([]string, len(l.hashes)), } b2resp := &b2types.FinishLargeFileResponse{} for k, v := range l.hashes { if len(b2req.Hashes) < k { return nil, fmt.Errorf("b2_finish_large_file: invalid index %d", k) } b2req.Hashes[k-1] = v } headers := map[string]string{ "Authorization": l.b2.authToken, } if err := l.b2.opts.makeRequest(ctx, "b2_finish_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_finish_large_file", b2req, b2resp, headers, nil); err != nil { return nil, err } return &File{ Name: b2resp.Name, Size: l.size, Timestamp: millitime(b2resp.Timestamp), Status: b2resp.Action, id: b2resp.FileID, b2: l.b2, }, nil } // ListUnfinishedLargeFiles wraps b2_list_unfinished_large_files. func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]*File, string, error) { b2req := &b2types.ListUnfinishedLargeFilesRequest{ BucketID: b.id, Continuation: continuation, Count: count, } b2resp := &b2types.ListUnfinishedLargeFilesResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_list_unfinished_large_files", "POST", b.b2.apiURI+b2types.V1api+"b2_list_unfinished_large_files", b2req, b2resp, headers, nil); err != nil { return nil, "", err } cont := b2resp.Continuation var files []*File for _, f := range b2resp.Files { files = append(files, &File{ Name: f.Name, Timestamp: millitime(f.Timestamp), b2: b.b2, id: f.FileID, Info: &FileInfo{ Name: f.Name, ContentType: f.ContentType, Info: f.Info, Timestamp: millitime(f.Timestamp), }, }) } return files, cont, nil } // ListFileNames wraps b2_list_file_names. func (b *Bucket) ListFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]*File, string, error) { b2req := &b2types.ListFileNamesRequest{ Count: count, Continuation: continuation, BucketID: b.id, Prefix: prefix, Delimiter: delimiter, } b2resp := &b2types.ListFileNamesResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_list_file_names", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_names", b2req, b2resp, headers, nil); err != nil { return nil, "", err } cont := b2resp.Continuation var files []*File for _, f := range b2resp.Files { files = append(files, &File{ Name: f.Name, Size: f.Size, Status: f.Action, Timestamp: millitime(f.Timestamp), Info: &FileInfo{ Name: f.Name, SHA1: f.SHA1, Size: f.Size, ContentType: f.ContentType, Info: f.Info, Status: f.Action, Timestamp: millitime(f.Timestamp), }, id: f.FileID, b2: b.b2, }) } return files, cont, nil } // ListFileVersions wraps b2_list_file_versions. func (b *Bucket) ListFileVersions(ctx context.Context, count int, startName, startID, prefix, delimiter string) ([]*File, string, string, error) { b2req := &b2types.ListFileVersionsRequest{ BucketID: b.id, Count: count, StartName: startName, StartID: startID, Prefix: prefix, Delimiter: delimiter, } b2resp := &b2types.ListFileVersionsResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_list_file_versions", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_versions", b2req, b2resp, headers, nil); err != nil { return nil, "", "", err } var files []*File for _, f := range b2resp.Files { files = append(files, &File{ Name: f.Name, Size: f.Size, Status: f.Action, Timestamp: millitime(f.Timestamp), Info: &FileInfo{ Name: f.Name, SHA1: f.SHA1, Size: f.Size, ContentType: f.ContentType, Info: f.Info, Status: f.Action, Timestamp: millitime(f.Timestamp), }, id: f.FileID, b2: b.b2, }) } return files, b2resp.NextName, b2resp.NextID, nil } // GetDownloadAuthorization wraps b2_get_download_authorization. func (b *Bucket) GetDownloadAuthorization(ctx context.Context, prefix string, valid time.Duration) (string, error) { b2req := &b2types.GetDownloadAuthorizationRequest{ BucketID: b.id, Prefix: prefix, Valid: int(valid.Seconds()), } b2resp := &b2types.GetDownloadAuthorizationResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_get_download_authorization", "POST", b.b2.apiURI+b2types.V1api+"b2_get_download_authorization", b2req, b2resp, headers, nil); err != nil { return "", err } return b2resp.Token, nil } // FileReader is an io.ReadCloser that downloads a file from B2. type FileReader struct { io.ReadCloser ContentLength int ContentType string SHA1 string ID string Info map[string]string } func mkRange(offset, size int64) string { if offset == 0 && size == 0 { return "" } if size == 0 { return fmt.Sprintf("bytes=%d-", offset) } return fmt.Sprintf("bytes=%d-%d", offset, offset+size-1) } // DownloadFileByName wraps b2_download_file_by_name. func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, size int64) (*FileReader, error) { uri := fmt.Sprintf("%s/file/%s/%s", b.b2.downloadURI, b.Name, escape(name)) req, err := http.NewRequest("GET", uri, nil) if err != nil { return nil, err } req.Header.Set("Authorization", b.b2.authToken) req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) req.Header.Set("X-Blazer-Method", "b2_download_file_by_name") b.b2.opts.addHeaders(req) rng := mkRange(offset, size) if rng != "" { req.Header.Set("Range", rng) } logRequest(req, nil) resp, err := makeNetRequest(ctx, req, b.b2.opts.getTransport()) if err != nil { return nil, err } logResponse(resp, nil) if resp.StatusCode != 200 && resp.StatusCode != 206 { defer resp.Body.Close() return nil, mkErr(resp) } clen, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { resp.Body.Close() return nil, err } info := make(map[string]string) for key := range resp.Header { if !strings.HasPrefix(key, "X-Bz-Info-") { continue } name, err := unescape(strings.TrimPrefix(key, "X-Bz-Info-")) if err != nil { resp.Body.Close() return nil, err } val, err := unescape(resp.Header.Get(key)) if err != nil { resp.Body.Close() return nil, err } info[name] = val } return &FileReader{ ReadCloser: resp.Body, SHA1: resp.Header.Get("X-Bz-Content-Sha1"), ID: resp.Header.Get("X-Bz-File-Id"), ContentType: resp.Header.Get("Content-Type"), ContentLength: int(clen), Info: info, }, nil } // HideFile wraps b2_hide_file. func (b *Bucket) HideFile(ctx context.Context, name string) (*File, error) { b2req := &b2types.HideFileRequest{ BucketID: b.id, File: name, } b2resp := &b2types.HideFileResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_hide_file", "POST", b.b2.apiURI+b2types.V1api+"b2_hide_file", b2req, b2resp, headers, nil); err != nil { return nil, err } return &File{ Status: b2resp.Action, Name: name, Timestamp: millitime(b2resp.Timestamp), b2: b.b2, id: b2resp.ID, }, nil } // FileInfo holds information about a specific file. type FileInfo struct { Name string SHA1 string Size int64 ContentType string Info map[string]string Status string Timestamp time.Time } // GetFileInfo wraps b2_get_file_info. func (f *File) GetFileInfo(ctx context.Context) (*FileInfo, error) { b2req := &b2types.GetFileInfoRequest{ ID: f.id, } b2resp := &b2types.GetFileInfoResponse{} headers := map[string]string{ "Authorization": f.b2.authToken, } if err := f.b2.opts.makeRequest(ctx, "b2_get_file_info", "POST", f.b2.apiURI+b2types.V1api+"b2_get_file_info", b2req, b2resp, headers, nil); err != nil { return nil, err } f.Status = b2resp.Action f.Name = b2resp.Name f.Timestamp = millitime(b2resp.Timestamp) f.Info = &FileInfo{ Name: b2resp.Name, SHA1: b2resp.SHA1, Size: b2resp.Size, ContentType: b2resp.ContentType, Info: b2resp.Info, Status: b2resp.Action, Timestamp: millitime(b2resp.Timestamp), } return f.Info, nil } blazer-0.4.4/base/integration_test.go000066400000000000000000000475321330654575600176520ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package base import ( "bytes" "crypto/sha1" "encoding/json" "fmt" "io" "os" "reflect" "strings" "testing" "time" "github.com/kurin/blazer/x/transport" "context" ) const ( apiID = "B2_ACCOUNT_ID" apiKey = "B2_SECRET_KEY" ) const ( bucketName = "base-tests" smallFileName = "TeenyTiny" largeFileName = "BigBytes" ) type zReader struct{} func (zReader) Read(p []byte) (int, error) { return len(p), nil } func TestStorage(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() // b2_authorize_account b2, err := AuthorizeAccount(ctx, id, key, UserAgent("blazer-base-test")) if err != nil { t.Fatal(err) } // b2_create_bucket infoKey := "key" infoVal := "val" m := map[string]string{infoKey: infoVal} rules := []LifecycleRule{ { Prefix: "what/", DaysNewUntilHidden: 5, }, } bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", m, rules) if err != nil { t.Fatal(err) } if bucket.Info[infoKey] != infoVal { t.Errorf("%s: bucketInfo[%q] got %q, want %q", bucket.Name, infoKey, bucket.Info[infoKey], infoVal) } if len(bucket.LifecycleRules) != 1 { t.Errorf("%s: lifecycle rules: got %d rules, wanted 1", bucket.Name, len(bucket.LifecycleRules)) } defer func() { // b2_delete_bucket if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() // b2_update_bucket bucket.Info["new"] = "yay" bucket.LifecycleRules = nil // Unset options should be a noop. newBucket, err := bucket.Update(ctx) if err != nil { t.Errorf("%s: update bucket: %v", bucket.Name, err) return } bucket = newBucket if bucket.Info["new"] != "yay" { t.Errorf("%s: info key \"new\": got %s, want \"yay\"", bucket.Name, bucket.Info["new"]) } if len(bucket.LifecycleRules) != 1 { t.Errorf("%s: lifecycle rules: got %d rules, wanted 1", bucket.Name, len(bucket.LifecycleRules)) } // b2_list_buckets buckets, err := b2.ListBuckets(ctx) if err != nil { t.Fatal(err) } var found bool for _, bucket := range buckets { if bucket.Name == bname { found = true break } } if !found { t.Errorf("%s: new bucket not found", bname) } // b2_get_upload_url ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } // b2_upload_file smallFile := io.LimitReader(zReader{}, 1024*50) // 50k hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) smallInfoMap := map[string]string{ "one": "1", "two": "2", } file, err := ue.UploadFile(ctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, smallInfoMap) if err != nil { t.Fatal(err) } defer func() { // b2_delete_file_version if err := file.DeleteFileVersion(ctx); err != nil { t.Error(err) } }() // b2_start_large_file largeInfoMap := map[string]string{ "one_billion": "1e9", "two_trillion": "2eSomething, I guess 2e12", } lf, err := bucket.StartLargeFile(ctx, largeFileName, "application/octet-stream", largeInfoMap) if err != nil { t.Fatal(err) } // b2_get_upload_part_url fc, err := lf.GetUploadPartURL(ctx) if err != nil { t.Fatal(err) } // b2_upload_part largeFile := io.LimitReader(zReader{}, 10e6) // 10M for i := 0; i < 2; i++ { r := io.LimitReader(largeFile, 5e6) // 5M hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, r); err != nil { t.Error(err) } if _, err := fc.UploadPart(ctx, buf, fmt.Sprintf("%x", hash.Sum(nil)), buf.Len(), i+1); err != nil { t.Error(err) } } // b2_finish_large_file lfile, err := lf.FinishLargeFile(ctx) if err != nil { t.Fatal(err) } // b2_get_file_info smallInfo, err := file.GetFileInfo(ctx) if err != nil { t.Fatal(err) } compareFileAndInfo(t, smallInfo, smallFileName, smallSHA1, smallInfoMap) largeInfo, err := lfile.GetFileInfo(ctx) if err != nil { t.Fatal(err) } compareFileAndInfo(t, largeInfo, largeFileName, "none", largeInfoMap) defer func() { if err := lfile.DeleteFileVersion(ctx); err != nil { t.Error(err) } }() clf, err := bucket.StartLargeFile(ctx, largeFileName, "application/octet-stream", nil) if err != nil { t.Fatal(err) } // b2_cancel_large_file if err := clf.CancelLargeFile(ctx); err != nil { t.Fatal(err) } // b2_list_file_names files, _, err := bucket.ListFileNames(ctx, 100, "", "", "") if err != nil { t.Fatal(err) } if len(files) != 2 { t.Errorf("expected 2 files, got %d: %v", len(files), files) } // b2_download_file_by_name fr, err := bucket.DownloadFileByName(ctx, smallFileName, 0, 0) if err != nil { t.Fatal(err) } if fr.SHA1 != smallSHA1 { t.Errorf("small file SHAs don't match: got %q, want %q", fr.SHA1, smallSHA1) } lbuf := &bytes.Buffer{} if _, err := io.Copy(lbuf, fr); err != nil { t.Fatal(err) } if lbuf.Len() != fr.ContentLength { t.Errorf("small file retreived lengths don't match: got %d, want %d", lbuf.Len(), fr.ContentLength) } // b2_hide_file hf, err := bucket.HideFile(ctx, smallFileName) if err != nil { t.Fatal(err) } defer func() { if err := hf.DeleteFileVersion(ctx); err != nil { t.Error(err) } }() // b2_list_file_versions files, _, _, err = bucket.ListFileVersions(ctx, 100, "", "", "", "") if err != nil { t.Fatal(err) } if len(files) != 3 { t.Errorf("expected 3 files, got %d: %v", len(files), files) } // b2_get_download_authorization if _, err := bucket.GetDownloadAuthorization(ctx, "foo/", 24*time.Hour); err != nil { t.Errorf("failed to get download auth token: %v", err) } } func TestUploadAuthAfterConnectionHang(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() hung := make(chan struct{}) // An http.RoundTripper that dies after sending ~10k bytes. hang := func() { close(hung) select {} } tport := transport.WithFailures(nil, transport.AfterNBytes(10000, hang)) b2, err := AuthorizeAccount(ctx, id, key, Transport(tport)) if err != nil { t.Fatal(err) } bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", nil, nil) if err != nil { t.Fatal(err) } defer func() { if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } smallFile := io.LimitReader(zReader{}, 1024*50) // 50k hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) go func() { ue.UploadFile(ctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil) t.Fatal("this ought not to be reachable") }() <-hung // Do the whole thing again with the same upload auth, before the remote end // notices we're gone. smallFile = io.LimitReader(zReader{}, 1024*50) // 50k again buf.Reset() if _, err := io.Copy(buf, smallFile); err != nil { t.Error(err) } file, err := ue.UploadFile(ctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil) if err == nil { t.Error("expected an error, got none") if err := file.DeleteFileVersion(ctx); err != nil { t.Error(err) } } if Action(err) != AttemptNewUpload { t.Errorf("Action(%v): got %v, want AttemptNewUpload", err, Action(err)) } } func TestCancelledContextCancelsHTTPRequest(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() tport := transport.WithFailures(nil, transport.MatchPathSubstring("b2_upload_file"), transport.FailureRate(1), transport.Stall(2*time.Second)) b2, err := AuthorizeAccount(ctx, id, key, Transport(tport)) if err != nil { t.Fatal(err) } bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", nil, nil) if err != nil { t.Fatal(err) } defer func() { if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } smallFile := io.LimitReader(zReader{}, 1024*50) // 50k hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) cctx, cancel := context.WithCancel(ctx) go func() { time.Sleep(1) cancel() }() if _, err := ue.UploadFile(cctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil); err != context.Canceled { t.Errorf("expected canceled context, but got %v", err) } } func TestDeadlineExceededContextCancelsHTTPRequest(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() tport := transport.WithFailures(nil, transport.MatchPathSubstring("b2_upload_file"), transport.FailureRate(1), transport.Stall(2*time.Second)) b2, err := AuthorizeAccount(ctx, id, key, Transport(tport)) if err != nil { t.Fatal(err) } bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", nil, nil) if err != nil { t.Fatal(err) } defer func() { if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } smallFile := io.LimitReader(zReader{}, 1024*50) // 50k hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) cctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() if _, err := ue.UploadFile(cctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil); err != context.DeadlineExceeded { t.Errorf("expected deadline exceeded error, but got %v", err) } } func compareFileAndInfo(t *testing.T, info *FileInfo, name, sha1 string, imap map[string]string) { if info.Name != name { t.Errorf("got %q, want %q", info.Name, name) } if info.SHA1 != sha1 { t.Errorf("got %q, want %q", info.SHA1, sha1) } if !reflect.DeepEqual(info.Info, imap) { t.Errorf("got %v, want %v", info.Info, imap) } } // from https://www.backblaze.com/b2/docs/string_encoding.html var testCases = `[ {"fullyEncoded": "%20", "minimallyEncoded": "+", "string": " "}, {"fullyEncoded": "%21", "minimallyEncoded": "!", "string": "!"}, {"fullyEncoded": "%22", "minimallyEncoded": "%22", "string": "\""}, {"fullyEncoded": "%23", "minimallyEncoded": "%23", "string": "#"}, {"fullyEncoded": "%24", "minimallyEncoded": "$", "string": "$"}, {"fullyEncoded": "%25", "minimallyEncoded": "%25", "string": "%"}, {"fullyEncoded": "%26", "minimallyEncoded": "%26", "string": "&"}, {"fullyEncoded": "%27", "minimallyEncoded": "'", "string": "'"}, {"fullyEncoded": "%28", "minimallyEncoded": "(", "string": "("}, {"fullyEncoded": "%29", "minimallyEncoded": ")", "string": ")"}, {"fullyEncoded": "%2A", "minimallyEncoded": "*", "string": "*"}, {"fullyEncoded": "%2B", "minimallyEncoded": "%2B", "string": "+"}, {"fullyEncoded": "%2C", "minimallyEncoded": "%2C", "string": ","}, {"fullyEncoded": "%2D", "minimallyEncoded": "-", "string": "-"}, {"fullyEncoded": "%2E", "minimallyEncoded": ".", "string": "."}, {"fullyEncoded": "/", "minimallyEncoded": "/", "string": "/"}, {"fullyEncoded": "%30", "minimallyEncoded": "0", "string": "0"}, {"fullyEncoded": "%31", "minimallyEncoded": "1", "string": "1"}, {"fullyEncoded": "%32", "minimallyEncoded": "2", "string": "2"}, {"fullyEncoded": "%33", "minimallyEncoded": "3", "string": "3"}, {"fullyEncoded": "%34", "minimallyEncoded": "4", "string": "4"}, {"fullyEncoded": "%35", "minimallyEncoded": "5", "string": "5"}, {"fullyEncoded": "%36", "minimallyEncoded": "6", "string": "6"}, {"fullyEncoded": "%37", "minimallyEncoded": "7", "string": "7"}, {"fullyEncoded": "%38", "minimallyEncoded": "8", "string": "8"}, {"fullyEncoded": "%39", "minimallyEncoded": "9", "string": "9"}, {"fullyEncoded": "%3A", "minimallyEncoded": ":", "string": ":"}, {"fullyEncoded": "%3B", "minimallyEncoded": ";", "string": ";"}, {"fullyEncoded": "%3C", "minimallyEncoded": "%3C", "string": "<"}, {"fullyEncoded": "%3D", "minimallyEncoded": "=", "string": "="}, {"fullyEncoded": "%3E", "minimallyEncoded": "%3E", "string": ">"}, {"fullyEncoded": "%3F", "minimallyEncoded": "%3F", "string": "?"}, {"fullyEncoded": "%40", "minimallyEncoded": "@", "string": "@"}, {"fullyEncoded": "%41", "minimallyEncoded": "A", "string": "A"}, {"fullyEncoded": "%42", "minimallyEncoded": "B", "string": "B"}, {"fullyEncoded": "%43", "minimallyEncoded": "C", "string": "C"}, {"fullyEncoded": "%44", "minimallyEncoded": "D", "string": "D"}, {"fullyEncoded": "%45", "minimallyEncoded": "E", "string": "E"}, {"fullyEncoded": "%46", "minimallyEncoded": "F", "string": "F"}, {"fullyEncoded": "%47", "minimallyEncoded": "G", "string": "G"}, {"fullyEncoded": "%48", "minimallyEncoded": "H", "string": "H"}, {"fullyEncoded": "%49", "minimallyEncoded": "I", "string": "I"}, {"fullyEncoded": "%4A", "minimallyEncoded": "J", "string": "J"}, {"fullyEncoded": "%4B", "minimallyEncoded": "K", "string": "K"}, {"fullyEncoded": "%4C", "minimallyEncoded": "L", "string": "L"}, {"fullyEncoded": "%4D", "minimallyEncoded": "M", "string": "M"}, {"fullyEncoded": "%4E", "minimallyEncoded": "N", "string": "N"}, {"fullyEncoded": "%4F", "minimallyEncoded": "O", "string": "O"}, {"fullyEncoded": "%50", "minimallyEncoded": "P", "string": "P"}, {"fullyEncoded": "%51", "minimallyEncoded": "Q", "string": "Q"}, {"fullyEncoded": "%52", "minimallyEncoded": "R", "string": "R"}, {"fullyEncoded": "%53", "minimallyEncoded": "S", "string": "S"}, {"fullyEncoded": "%54", "minimallyEncoded": "T", "string": "T"}, {"fullyEncoded": "%55", "minimallyEncoded": "U", "string": "U"}, {"fullyEncoded": "%56", "minimallyEncoded": "V", "string": "V"}, {"fullyEncoded": "%57", "minimallyEncoded": "W", "string": "W"}, {"fullyEncoded": "%58", "minimallyEncoded": "X", "string": "X"}, {"fullyEncoded": "%59", "minimallyEncoded": "Y", "string": "Y"}, {"fullyEncoded": "%5A", "minimallyEncoded": "Z", "string": "Z"}, {"fullyEncoded": "%5B", "minimallyEncoded": "%5B", "string": "["}, {"fullyEncoded": "%5C", "minimallyEncoded": "%5C", "string": "\\"}, {"fullyEncoded": "%5D", "minimallyEncoded": "%5D", "string": "]"}, {"fullyEncoded": "%5E", "minimallyEncoded": "%5E", "string": "^"}, {"fullyEncoded": "%5F", "minimallyEncoded": "_", "string": "_"}, {"fullyEncoded": "%60", "minimallyEncoded": "%60", "string": "` + "`" + `"}, {"fullyEncoded": "%61", "minimallyEncoded": "a", "string": "a"}, {"fullyEncoded": "%62", "minimallyEncoded": "b", "string": "b"}, {"fullyEncoded": "%63", "minimallyEncoded": "c", "string": "c"}, {"fullyEncoded": "%64", "minimallyEncoded": "d", "string": "d"}, {"fullyEncoded": "%65", "minimallyEncoded": "e", "string": "e"}, {"fullyEncoded": "%66", "minimallyEncoded": "f", "string": "f"}, {"fullyEncoded": "%67", "minimallyEncoded": "g", "string": "g"}, {"fullyEncoded": "%68", "minimallyEncoded": "h", "string": "h"}, {"fullyEncoded": "%69", "minimallyEncoded": "i", "string": "i"}, {"fullyEncoded": "%6A", "minimallyEncoded": "j", "string": "j"}, {"fullyEncoded": "%6B", "minimallyEncoded": "k", "string": "k"}, {"fullyEncoded": "%6C", "minimallyEncoded": "l", "string": "l"}, {"fullyEncoded": "%6D", "minimallyEncoded": "m", "string": "m"}, {"fullyEncoded": "%6E", "minimallyEncoded": "n", "string": "n"}, {"fullyEncoded": "%6F", "minimallyEncoded": "o", "string": "o"}, {"fullyEncoded": "%70", "minimallyEncoded": "p", "string": "p"}, {"fullyEncoded": "%71", "minimallyEncoded": "q", "string": "q"}, {"fullyEncoded": "%72", "minimallyEncoded": "r", "string": "r"}, {"fullyEncoded": "%73", "minimallyEncoded": "s", "string": "s"}, {"fullyEncoded": "%74", "minimallyEncoded": "t", "string": "t"}, {"fullyEncoded": "%75", "minimallyEncoded": "u", "string": "u"}, {"fullyEncoded": "%76", "minimallyEncoded": "v", "string": "v"}, {"fullyEncoded": "%77", "minimallyEncoded": "w", "string": "w"}, {"fullyEncoded": "%78", "minimallyEncoded": "x", "string": "x"}, {"fullyEncoded": "%79", "minimallyEncoded": "y", "string": "y"}, {"fullyEncoded": "%7A", "minimallyEncoded": "z", "string": "z"}, {"fullyEncoded": "%7B", "minimallyEncoded": "%7B", "string": "{"}, {"fullyEncoded": "%7C", "minimallyEncoded": "%7C", "string": "|"}, {"fullyEncoded": "%7D", "minimallyEncoded": "%7D", "string": "}"}, {"fullyEncoded": "%7E", "minimallyEncoded": "~", "string": "~"}, {"fullyEncoded": "%7F", "minimallyEncoded": "%7F", "string": "\u007f"}, {"fullyEncoded": "%E8%87%AA%E7%94%B1", "minimallyEncoded": "%E8%87%AA%E7%94%B1", "string": "\u81ea\u7531"}, {"fullyEncoded": "%F0%90%90%80", "minimallyEncoded": "%F0%90%90%80", "string": "\ud801\udc00"} ]` type testCase struct { Full string `json:"fullyEncoded"` Min string `json:"minimallyEncoded"` Raw string `json:"string"` } func TestEscapes(t *testing.T) { dec := json.NewDecoder(strings.NewReader(testCases)) var tcs []testCase if err := dec.Decode(&tcs); err != nil { t.Fatal(err) } for _, tc := range tcs { en := escape(tc.Raw) if !(en == tc.Full || en == tc.Min) { t.Errorf("encode %q: got %q, want %q or %q", tc.Raw, en, tc.Min, tc.Full) } m, err := unescape(tc.Min) if err != nil { t.Errorf("decode %q: %v", tc.Min, err) } if m != tc.Raw { t.Errorf("decode %q: got %q, want %q", tc.Min, m, tc.Raw) } f, err := unescape(tc.Full) if err != nil { t.Errorf("decode %q: %v", tc.Full, err) } if f != tc.Raw { t.Errorf("decode %q: got %q, want %q", tc.Full, f, tc.Raw) } } } func TestUploadDownloadFilenameEscaping(t *testing.T) { filename := "file%foo.txt" id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() // b2_authorize_account b2, err := AuthorizeAccount(ctx, id, key, UserAgent("blazer-base-test")) if err != nil { t.Fatal(err) } // b2_create_bucket bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", nil, nil) if err != nil { t.Fatal(err) } defer func() { // b2_delete_bucket if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() // b2_get_upload_url ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } // b2_upload_file smallFile := io.LimitReader(zReader{}, 128) hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) file, err := ue.UploadFile(ctx, buf, buf.Len(), filename, "application/octet-stream", smallSHA1, nil) if err != nil { t.Fatal(err) } defer func() { // b2_delete_file_version if err := file.DeleteFileVersion(ctx); err != nil { t.Error(err) } }() // b2_download_file_by_name fr, err := bucket.DownloadFileByName(ctx, filename, 0, 0) if err != nil { t.Fatal(err) } lbuf := &bytes.Buffer{} if _, err := io.Copy(lbuf, fr); err != nil { t.Fatal(err) } } blazer-0.4.4/base/strings.go000066400000000000000000000014401330654575600157450ustar00rootroot00000000000000// Copyright 2017, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package base import ( "net/url" "strings" ) func escape(s string) string { return strings.Replace(url.QueryEscape(s), "%2F", "/", -1) } func unescape(s string) (string, error) { return url.QueryUnescape(s) } blazer-0.4.4/base/strings_test.go000066400000000000000000000016671330654575600170170ustar00rootroot00000000000000package base import ( "fmt" "testing" ) func TestEncodeDecode(t *testing.T) { // crashes identified by go-fuzz origs := []string{ "&\x020000", "&\x020000\x9c", "&\x020\x9c0", "&\x0230j", "&\x02\x98000", "&\x02\x983\xc8j00", "00\x000", "00\x0000", "00\x0000000000000", "\x11\x030", } for _, orig := range origs { escaped := escape(orig) unescaped, err := unescape(escaped) if err != nil { t.Errorf("%s: orig: %#v, escaped: %#v, unescaped: %#v\n", err.Error(), orig, escaped, unescaped) continue } if unescaped != orig { t.Errorf("expected: %#v, got: %#v", orig, unescaped) } } } // hook for go-fuzz: https://github.com/dvyukov/go-fuzz func Fuzz(data []byte) int { orig := string(data) escaped := escape(orig) unescaped, err := unescape(escaped) if err != nil { return 0 } if unescaped != orig { panic(fmt.Sprintf("unescaped: \"%#v\", != orig: \"%#v\"", unescaped, orig)) } return 1 } blazer-0.4.4/examples/000077500000000000000000000000001330654575600146325ustar00rootroot00000000000000blazer-0.4.4/examples/simple/000077500000000000000000000000001330654575600161235ustar00rootroot00000000000000blazer-0.4.4/examples/simple/simple.go000066400000000000000000000056361330654575600177550ustar00rootroot00000000000000// Copyright 2017, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This is a simple program that will copy named files into or out of B2. // // To copy a file into B2: // // B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple /path/to/file b2://bucket/path/to/dst // // To copy a file out: // // B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple b2://bucket/path/to/file /path/to/dst package main import ( "context" "flag" "fmt" "io" "net/url" "os" "strings" "github.com/kurin/blazer/b2" ) func main() { flag.Parse() b2id := os.Getenv("B2_ACCOUNT_ID") b2key := os.Getenv("B2_ACCOUNT_KEY") args := flag.Args() if len(args) != 2 { fmt.Printf("Usage:\n\nsimple [src] [dst]\n") return } src, dst := args[0], args[1] ctx := context.Background() c, err := b2.NewClient(ctx, b2id, b2key) if err != nil { fmt.Println(err) return } var r io.ReadCloser var w io.WriteCloser if strings.HasPrefix(src, "b2://") { reader, err := b2Reader(ctx, c, src) if err != nil { fmt.Println(err) return } r = reader } else { f, err := os.Open(src) if err != nil { fmt.Println(err) return } r = f } // Readers do not need their errors checked on close. (Also it's a little // silly to defer this in main(), but.) defer r.Close() if strings.HasPrefix(dst, "b2://") { writer, err := b2Writer(ctx, c, dst) if err != nil { fmt.Println(err) return } w = writer } else { f, err := os.Create(dst) if err != nil { fmt.Println(err) return } w = f } // Copy and check error. if _, err := io.Copy(w, r); err != nil { fmt.Println(err) return } // It is very important to check the error of the writer. if err := w.Close(); err != nil { fmt.Println(err) } } func b2Reader(ctx context.Context, c *b2.Client, path string) (io.ReadCloser, error) { o, err := b2Obj(ctx, c, path) if err != nil { return nil, err } return o.NewReader(ctx), nil } func b2Writer(ctx context.Context, c *b2.Client, path string) (io.WriteCloser, error) { o, err := b2Obj(ctx, c, path) if err != nil { return nil, err } return o.NewWriter(ctx), nil } func b2Obj(ctx context.Context, c *b2.Client, path string) (*b2.Object, error) { uri, err := url.Parse(path) if err != nil { return nil, err } bucket, err := c.Bucket(ctx, uri.Host) if err != nil { return nil, err } // B2 paths must not begin with /, so trim it here. return bucket.Object(strings.TrimPrefix(uri.Path, "/")), nil } blazer-0.4.4/internal/000077500000000000000000000000001330654575600146305ustar00rootroot00000000000000blazer-0.4.4/internal/b2assets/000077500000000000000000000000001330654575600163565ustar00rootroot00000000000000blazer-0.4.4/internal/b2assets/b2assets.go000066400000000000000000000155041330654575600204400ustar00rootroot00000000000000// Code generated by go-bindata. // sources: // data/status.html // DO NOT EDIT! package b2assets import ( "bytes" "compress/gzip" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "time" ) func bindataRead(data []byte, name string) ([]byte, error) { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err != nil { return nil, fmt.Errorf("Read %q: %v", name, err) } var buf bytes.Buffer _, err = io.Copy(&buf, gz) clErr := gz.Close() if err != nil { return nil, fmt.Errorf("Read %q: %v", name, err) } if clErr != nil { return nil, err } return buf.Bytes(), nil } type asset struct { bytes []byte info os.FileInfo } type bindataFileInfo struct { name string size int64 mode os.FileMode modTime time.Time } func (fi bindataFileInfo) Name() string { return fi.name } func (fi bindataFileInfo) Size() int64 { return fi.size } func (fi bindataFileInfo) Mode() os.FileMode { return fi.mode } func (fi bindataFileInfo) ModTime() time.Time { return fi.modTime } func (fi bindataFileInfo) IsDir() bool { return false } func (fi bindataFileInfo) Sys() interface{} { return nil } var _dataStatusHtml = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd4\x93\x41\x6f\xe3\x20\x10\x85\xef\xf9\x15\xb3\x56\x8e\x51\x90\x73\x5c\x4d\xb8\xec\xee\x79\xa3\xaa\x52\xd5\x23\x36\xa3\x60\x09\x43\x84\x71\x9a\xc8\xe2\xbf\x57\x18\x83\xa3\xb6\x87\x5e\x7b\xf2\x98\xf7\xe6\xf1\xbe\x03\xf8\xeb\xef\xff\x3f\xcf\xaf\xa7\x7f\xa0\x7c\xaf\xf9\x06\xf3\x87\x84\xe4\x1b\x00\xf4\x9d\xd7\xc4\x9b\x03\xb4\xba\x23\xe3\x61\xf0\xc2\x8f\x03\xb2\x74\xbe\x41\x96\x9c\xd8\x58\x79\x8f\x0b\xd3\xb4\xed\xc9\x2b\x2b\x07\xf8\x7d\x84\x3c\xee\x43\x48\x9a\x1c\x9d\xf0\x9d\x35\xb3\xba\xfe\x14\xdd\x8b\x46\x53\xd4\xd2\x90\xce\x51\xd5\xbc\xb5\xa3\xf1\xd0\xdc\xa1\xb5\x92\x90\xa9\x3a\xb5\x8b\xae\x38\xc5\x65\x27\xcc\x99\x60\xb9\x3e\x66\xe4\x26\x73\x48\x74\xbb\x64\x8d\xa3\xe4\xa5\x69\x08\xc8\xbc\xcc\x52\xc9\xc9\xed\xe6\xa4\x52\x75\xc9\x5a\x43\x3a\x23\xe9\x06\x4b\xf1\x7c\x79\xf1\x7f\xcc\x26\x23\x73\x1b\x96\xeb\xac\xa7\xc8\x0a\x50\x64\x1e\x2f\xda\x0a\x39\x64\xda\x87\x6e\x46\xf4\xb4\x83\xed\x55\xe8\xd8\x6e\xff\xe2\x3a\x4f\xae\x70\xaa\x03\x9f\xa6\x64\x82\x58\x40\x1d\x3e\xc1\x75\x72\x07\xdb\x8b\xb3\xe7\x99\xee\x2a\xf4\xfe\xe4\xec\xd9\xd1\xb0\x02\x46\xb4\x36\x3a\x43\x00\xbc\x2c\x2a\x5c\x85\x1e\xe9\x58\x4d\xd3\xbc\x1d\x42\x05\xbd\xb8\x1d\xab\xba\xe2\xc8\xb2\x89\x63\xe3\x80\x7d\x05\xfd\x80\xaa\x6a\x2e\xed\x9b\xf9\x26\xe1\x13\x09\xf9\xa3\x08\x91\xa5\x17\x81\x2c\xbd\xa8\xf7\x00\x00\x00\xff\xff\xd4\xf0\x90\xb4\x69\x03\x00\x00") func dataStatusHtmlBytes() ([]byte, error) { return bindataRead( _dataStatusHtml, "data/status.html", ) } func dataStatusHtml() (*asset, error) { bytes, err := dataStatusHtmlBytes() if err != nil { return nil, err } info := bindataFileInfo{name: "data/status.html", size: 873, mode: os.FileMode(436), modTime: time.Unix(1520578750, 0)} a := &asset{bytes: bytes, info: info} return a, nil } // Asset loads and returns the asset for the given name. // It returns an error if the asset could not be found or // could not be loaded. func Asset(name string) ([]byte, error) { cannonicalName := strings.Replace(name, "\\", "/", -1) if f, ok := _bindata[cannonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) } return a.bytes, nil } return nil, fmt.Errorf("Asset %s not found", name) } // MustAsset is like Asset but panics when Asset would return an error. // It simplifies safe initialization of global variables. func MustAsset(name string) []byte { a, err := Asset(name) if err != nil { panic("asset: Asset(" + name + "): " + err.Error()) } return a } // AssetInfo loads and returns the asset info for the given name. // It returns an error if the asset could not be found or // could not be loaded. func AssetInfo(name string) (os.FileInfo, error) { cannonicalName := strings.Replace(name, "\\", "/", -1) if f, ok := _bindata[cannonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) } return a.info, nil } return nil, fmt.Errorf("AssetInfo %s not found", name) } // AssetNames returns the names of the assets. func AssetNames() []string { names := make([]string, 0, len(_bindata)) for name := range _bindata { names = append(names, name) } return names } // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ "data/status.html": dataStatusHtml, } // AssetDir returns the file names below a certain // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the // following hierarchy: // data/ // foo.txt // img/ // a.png // b.png // then AssetDir("data") would return []string{"foo.txt", "img"} // AssetDir("data/img") would return []string{"a.png", "b.png"} // AssetDir("foo.txt") and AssetDir("notexist") would return an error // AssetDir("") will return []string{"data"}. func AssetDir(name string) ([]string, error) { node := _bintree if len(name) != 0 { cannonicalName := strings.Replace(name, "\\", "/", -1) pathList := strings.Split(cannonicalName, "/") for _, p := range pathList { node = node.Children[p] if node == nil { return nil, fmt.Errorf("Asset %s not found", name) } } } if node.Func != nil { return nil, fmt.Errorf("Asset %s not found", name) } rv := make([]string, 0, len(node.Children)) for childName := range node.Children { rv = append(rv, childName) } return rv, nil } type bintree struct { Func func() (*asset, error) Children map[string]*bintree } var _bintree = &bintree{nil, map[string]*bintree{ "data": &bintree{nil, map[string]*bintree{ "status.html": &bintree{dataStatusHtml, map[string]*bintree{}}, }}, }} // RestoreAsset restores an asset under the given directory func RestoreAsset(dir, name string) error { data, err := Asset(name) if err != nil { return err } info, err := AssetInfo(name) if err != nil { return err } err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) if err != nil { return err } err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) if err != nil { return err } err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) if err != nil { return err } return nil } // RestoreAssets restores an asset under the given directory recursively func RestoreAssets(dir, name string) error { children, err := AssetDir(name) // File if err != nil { return RestoreAsset(dir, name) } // Dir for _, child := range children { err = RestoreAssets(dir, filepath.Join(name, child)) if err != nil { return err } } return nil } func _filePath(dir, name string) string { cannonicalName := strings.Replace(name, "\\", "/", -1) return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) } blazer-0.4.4/internal/b2assets/data/000077500000000000000000000000001330654575600172675ustar00rootroot00000000000000blazer-0.4.4/internal/b2assets/data/status.html000066400000000000000000000015511330654575600215020ustar00rootroot00000000000000 b2 client status {{$methods := methods .}} {{$durations := durations .}} {{$table := table .}}

count by code

{{range $method := $methods}} {{range $duration := $durations}} {{end}} {{end}}
{{$method}}{{index $table $method $duration}}

uploads

{{range $name, $val := .Writers}}

{{ $name }}

{{range $id, $prog := $val.Progress}} {{inc $id}}
{{end}} {{end}}

downloads

{{range $name, $val := .Readers}}

{{ $name }}

{{range $id, $prog := $val.Progress}} {{inc $id}}
{{end}} {{end}} blazer-0.4.4/internal/b2assets/gen.go000066400000000000000000000013421330654575600174560ustar00rootroot00000000000000// Copyright 2018, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package b2assets contains data required by other libraries in blazer. package b2assets //go:generate go-bindata -pkg $GOPACKAGE -o b2assets.go data/ blazer-0.4.4/internal/b2types/000077500000000000000000000000001330654575600162205ustar00rootroot00000000000000blazer-0.4.4/internal/b2types/b2types.go000066400000000000000000000160121330654575600201370ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package b2types implements internal types common to the B2 API. package b2types // You know what would be amazing? If I could autogen this from like a JSON // file. Wouldn't that be amazing? That would be amazing. const ( V1api = "/b2api/v1/" ) type ErrorMessage struct { Status int `json:"status"` Code string `json:"code"` Msg string `json:"message"` } type AuthorizeAccountResponse struct { AccountID string `json:"accountId"` AuthToken string `json:"authorizationToken"` URI string `json:"apiUrl"` DownloadURI string `json:"downloadUrl"` MinPartSize int `json:"minimumPartSize"` } type LifecycleRule struct { DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"` DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"` Prefix string `json:"fileNamePrefix"` } type CreateBucketRequest struct { AccountID string `json:"accountId"` Name string `json:"bucketName"` Type string `json:"bucketType"` Info map[string]string `json:"bucketInfo"` LifecycleRules []LifecycleRule `json:"lifecycleRules"` } type CreateBucketResponse struct { BucketID string `json:"bucketId"` Name string `json:"bucketName"` Type string `json:"bucketType"` Info map[string]string `json:"bucketInfo"` LifecycleRules []LifecycleRule `json:"lifecycleRules"` Revision int `json:"revision"` } type DeleteBucketRequest struct { AccountID string `json:"accountId"` BucketID string `json:"bucketId"` } type ListBucketsRequest struct { AccountID string `json:"accountId"` } type ListBucketsResponse struct { Buckets []CreateBucketResponse `json:"buckets"` } type UpdateBucketRequest struct { AccountID string `json:"accountId"` BucketID string `json:"bucketId"` // bucketName is a required field according to // https://www.backblaze.com/b2/docs/b2_update_bucket.html. // // However, actually setting it returns 400: unknown field in // com.backblaze.modules.b2.data.UpdateBucketRequest: bucketName // //Name string `json:"bucketName"` Type string `json:"bucketType,omitempty"` Info map[string]string `json:"bucketInfo,omitempty"` LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"` IfRevisionIs int `json:"ifRevisionIs,omitempty"` } type UpdateBucketResponse CreateBucketResponse type GetUploadURLRequest struct { BucketID string `json:"bucketId"` } type GetUploadURLResponse struct { URI string `json:"uploadUrl"` Token string `json:"authorizationToken"` } type UploadFileResponse struct { FileID string `json:"fileId"` Timestamp int64 `json:"uploadTimestamp"` Action string `json:"action"` } type DeleteFileVersionRequest struct { Name string `json:"fileName"` FileID string `json:"fileId"` } type StartLargeFileRequest struct { BucketID string `json:"bucketId"` Name string `json:"fileName"` ContentType string `json:"contentType"` Info map[string]string `json:"fileInfo,omitempty"` } type StartLargeFileResponse struct { ID string `json:"fileId"` } type CancelLargeFileRequest struct { ID string `json:"fileId"` } type ListPartsRequest struct { ID string `json:"fileId"` Start int `json:"startPartNumber"` Count int `json:"maxPartCount"` } type ListPartsResponse struct { Next int `json:"nextPartNumber"` Parts []struct { ID string `json:"fileId"` Number int `json:"partNumber"` SHA1 string `json:"contentSha1"` Size int64 `json:"contentLength"` } `json:"parts"` } type getUploadPartURLRequest struct { ID string `json:"fileId"` } type getUploadPartURLResponse struct { URL string `json:"uploadUrl"` Token string `json:"authorizationToken"` } type FinishLargeFileRequest struct { ID string `json:"fileId"` Hashes []string `json:"partSha1Array"` } type FinishLargeFileResponse struct { Name string `json:"fileName"` FileID string `json:"fileId"` Timestamp int64 `json:"uploadTimestamp"` Action string `json:"action"` } type ListFileNamesRequest struct { BucketID string `json:"bucketId"` Count int `json:"maxFileCount"` Continuation string `json:"startFileName,omitempty"` Prefix string `json:"prefix,omitempty"` Delimiter string `json:"delimiter,omitempty"` } type ListFileNamesResponse struct { Continuation string `json:"nextFileName"` Files []GetFileInfoResponse `json:"files"` } type ListFileVersionsRequest struct { BucketID string `json:"bucketId"` Count int `json:"maxFileCount"` StartName string `json:"startFileName,omitempty"` StartID string `json:"startFileId,omitempty"` Prefix string `json:"prefix,omitempty"` Delimiter string `json:"delimiter,omitempty"` } type ListFileVersionsResponse struct { NextName string `json:"nextFileName"` NextID string `json:"nextFileId"` Files []GetFileInfoResponse `json:"files"` } type HideFileRequest struct { BucketID string `json:"bucketId"` File string `json:"fileName"` } type HideFileResponse struct { ID string `json:"fileId"` Timestamp int64 `json:"uploadTimestamp"` Action string `json:"action"` } type GetFileInfoRequest struct { ID string `json:"fileId"` } type GetFileInfoResponse struct { FileID string `json:"fileId"` Name string `json:"fileName"` SHA1 string `json:"contentSha1"` Size int64 `json:"contentLength"` ContentType string `json:"contentType"` Info map[string]string `json:"fileInfo"` Action string `json:"action"` Timestamp int64 `json:"uploadTimestamp"` } type GetDownloadAuthorizationRequest struct { BucketID string `json:"bucketId"` Prefix string `json:"fileNamePrefix"` Valid int `json:"validDurationInSeconds"` } type GetDownloadAuthorizationResponse struct { BucketID string `json:"bucketId"` Prefix string `json:"fileNamePrefix"` Token string `json:"authorizationToken"` } type ListUnfinishedLargeFilesRequest struct { BucketID string `json:"bucketId"` Continuation string `json:"startFileId,omitempty"` Count int `json:"maxFileCount,omitempty"` } type ListUnfinishedLargeFilesResponse struct { Files []GetFileInfoResponse `json:"files"` Continuation string `json:"nextFileId"` } blazer-0.4.4/internal/bin/000077500000000000000000000000001330654575600154005ustar00rootroot00000000000000blazer-0.4.4/internal/bin/cleanup/000077500000000000000000000000001330654575600170275ustar00rootroot00000000000000blazer-0.4.4/internal/bin/cleanup/cleanup.go000066400000000000000000000025721330654575600210130ustar00rootroot00000000000000package main import ( "context" "fmt" "os" "strings" "sync" "github.com/kurin/blazer/b2" ) const ( apiID = "B2_ACCOUNT_ID" apiKey = "B2_SECRET_KEY" ) func main() { id := os.Getenv(apiID) key := os.Getenv(apiKey) ctx := context.Background() client, err := b2.NewClient(ctx, id, key) if err != nil { fmt.Println(err) return } buckets, err := client.ListBuckets(ctx) if err != nil { fmt.Println(err) return } var kill []string for _, bucket := range buckets { if strings.HasPrefix(bucket.Name(), fmt.Sprintf("%s-b2-tests-", id)) { kill = append(kill, bucket.Name()) } if bucket.Name() == fmt.Sprintf("%s-consistobucket", id) || bucket.Name() == fmt.Sprintf("%s-base-tests", id) { kill = append(kill, bucket.Name()) } } var wg sync.WaitGroup for _, name := range kill { wg.Add(1) go func(name string) { defer wg.Done() fmt.Println("removing", name) if err := killBucket(ctx, client, name); err != nil { fmt.Println(err) } }(name) } wg.Wait() } func killBucket(ctx context.Context, client *b2.Client, name string) error { bucket, err := client.NewBucket(ctx, name, nil) if b2.IsNotExist(err) { return nil } if err != nil { return err } defer bucket.Delete(ctx) iter := bucket.List(ctx, b2.ListHidden()) for iter.Next() { if err := iter.Object().Delete(ctx); err != nil { fmt.Println(err) } } return iter.Err() } blazer-0.4.4/internal/blog/000077500000000000000000000000001330654575600155535ustar00rootroot00000000000000blazer-0.4.4/internal/blog/blog.go000066400000000000000000000023431330654575600170270ustar00rootroot00000000000000// Copyright 2017, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package blog implements a private logger, in the manner of glog, without // polluting the flag namespace or leaving files all over /tmp. // // It has almost no features, and a bunch of global state. package blog import ( "log" "os" "strconv" ) var level int32 type Verbose bool func init() { lvl := os.Getenv("B2_LOG_LEVEL") i, err := strconv.ParseInt(lvl, 10, 32) if err != nil { return } level = int32(i) } func (v Verbose) Info(a ...interface{}) { if v { log.Print(a...) } } func (v Verbose) Infof(format string, a ...interface{}) { if v { log.Printf(format, a...) } } func V(target int32) Verbose { return Verbose(target <= level) } blazer-0.4.4/x/000077500000000000000000000000001330654575600132635ustar00rootroot00000000000000blazer-0.4.4/x/consistent/000077500000000000000000000000001330654575600154545ustar00rootroot00000000000000blazer-0.4.4/x/consistent/consistent.go000066400000000000000000000233461330654575600202040ustar00rootroot00000000000000// Copyright 2016, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package consistent implements an experimental interface for using B2 as a // coordination primitive. package consistent import ( "bytes" "context" "crypto/rand" "encoding/base64" "encoding/json" "errors" "fmt" "io" "io/ioutil" "reflect" "github.com/kurin/blazer/b2" ) const metaKey = "blazer-meta-key-no-touchie" var ( errUpdateConflict = errors.New("update conflict") errNotInGroup = errors.New("not in group") ) // NewGroup creates a new consistent Group for the given bucket. func NewGroup(bucket *b2.Bucket, name string) *Group { return &Group{ name: name, b: bucket, } } // Group represents a collection of B2 objects that can be modified in a // consistent way. Objects in the same group contend with each other for // updates, but there can only be so many (maximum of 10; fewer if there are // other bucket attributes set) groups in a given bucket. type Group struct { name string b *b2.Bucket ba *b2.BucketAttrs } // Operate calls f with the contents of the group object given by name, and // updates that object with the output of f if f returns no error. Operate // guarantees that no other callers have modified the contents of name in the // meantime (as long as all other callers are using this package). It may call // f any number of times and, as a result, the potential data transfer is // unbounded. Callers should have f fail after a given number of attempts if // this is unacceptable. // // The io.Reader that f returns is guaranteed to be read until at least the // first error. Callers must ensure that this is sufficient for the reader to // clean up after itself. func (g *Group) OperateStream(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { for { r, err := g.NewReader(ctx, name) if err != nil && err != errNotInGroup { return err } out, err := f(r) r.Close() if err != nil { return err } defer io.Copy(ioutil.Discard, out) // ensure the reader is read w, err := g.NewWriter(ctx, r.Key, name) if err != nil { return err } if _, err := io.Copy(w, out); err != nil { return err } if err := w.Close(); err != nil { if err == errUpdateConflict { continue } return err } return nil } } // Operate uses OperateStream to act on byte slices. func (g *Group) Operate(ctx context.Context, name string, f func([]byte) ([]byte, error)) error { return g.OperateStream(ctx, name, func(r io.Reader) (io.Reader, error) { b, err := ioutil.ReadAll(r) if b2.IsNotExist(err) { b = nil err = nil } if err != nil { return nil, err } bs, err := f(b) if err != nil { return nil, err } return bytes.NewReader(bs), nil }) } // OperateJSON is a convenience function for transforming JSON data in B2 in a // consistent way. Callers should pass a function f which accepts a pointer to // a struct of a given type and transforms it into another struct (ideally but // not necessarily of the same type). Callers should also pass an example // struct, t, or a pointer to it, that is the same type. t will not be // altered. If there is no existing file, f will be called with an pointer to // an empty struct of type t. Otherwise, it will be called with a pointer to a // struct filled out with the given JSON. func (g *Group) OperateJSON(ctx context.Context, name string, t interface{}, f func(interface{}) (interface{}, error)) error { jsonType := reflect.TypeOf(t) for jsonType.Kind() == reflect.Ptr { jsonType = jsonType.Elem() } return g.OperateStream(ctx, name, func(r io.Reader) (io.Reader, error) { in := reflect.New(jsonType).Interface() if err := json.NewDecoder(r).Decode(in); err != nil && err != io.EOF && !b2.IsNotExist(err) { return nil, err } out, err := f(in) if err != nil { return nil, err } pr, pw := io.Pipe() go func() { pw.CloseWithError(json.NewEncoder(pw).Encode(out)) }() return closeAfterReading{rc: pr}, nil }) } // closeAfterReading closes the underlying reader on the first non-nil error type closeAfterReading struct { rc io.ReadCloser } func (car closeAfterReading) Read(p []byte) (int, error) { n, err := car.rc.Read(p) if err != nil { car.rc.Close() } return n, err } // Writer is an io.ReadCloser. type Writer struct { ctx context.Context wc io.WriteCloser name string suffix string key string g *Group } // Write implements io.Write. func (w Writer) Write(p []byte) (int, error) { return w.wc.Write(p) } // Close writes any remaining data into B2 and updates the group to reflect the // contents of the new object. If the group object has been modified, Close() // will fail. func (w Writer) Close() error { if err := w.wc.Close(); err != nil { return err } // TODO: maybe see if you can cut down on calls to info() for { ci, err := w.g.info(w.ctx) if err != nil { // Replacement failed; delete the new version. w.g.b.Object(w.name + "/" + w.suffix).Delete(w.ctx) return err } old, ok := ci.Locations[w.name] if ok && old != w.key { w.g.b.Object(w.name + "/" + w.suffix).Delete(w.ctx) return errUpdateConflict } ci.Locations[w.name] = w.suffix if err := w.g.save(w.ctx, ci); err != nil { if err == errUpdateConflict { continue } w.g.b.Object(w.name + "/" + w.suffix).Delete(w.ctx) return err } // Replacement successful; delete the old version. w.g.b.Object(w.name + "/" + w.key).Delete(w.ctx) return nil } } // Reader is an io.ReadCloser. Key must be passed to NewWriter. type Reader struct { r io.ReadCloser Key string } func (r Reader) Read(p []byte) (int, error) { if r.r == nil { return 0, io.EOF } return r.r.Read(p) } func (r Reader) Close() error { if r.r == nil { return nil } return r.r.Close() } // NewWriter creates a Writer and prepares it to be updated. The key argument // should come from the Key field of a Reader; if Writer.Close() returns with // no error, then the underlying group object was successfully updated from the // data available from the Reader with no intervening writes. New objects can // be created with an empty key. func (g *Group) NewWriter(ctx context.Context, key, name string) (Writer, error) { suffix, err := random() if err != nil { return Writer{}, err } return Writer{ ctx: ctx, wc: g.b.Object(name + "/" + suffix).NewWriter(ctx), name: name, suffix: suffix, key: key, g: g, }, nil } // NewReader creates a Reader with the current version of the object, as well // as that object's update key. func (g *Group) NewReader(ctx context.Context, name string) (Reader, error) { ci, err := g.info(ctx) if err != nil { return Reader{}, err } suffix, ok := ci.Locations[name] if !ok { return Reader{}, errNotInGroup } return Reader{ r: g.b.Object(name + "/" + suffix).NewReader(ctx), Key: suffix, }, nil } func (g *Group) info(ctx context.Context) (*consistentInfo, error) { attrs, err := g.b.Attrs(ctx) if err != nil { return nil, err } g.ba = attrs imap := attrs.Info if imap == nil { return nil, nil } enc, ok := imap[metaKey+"-"+g.name] if !ok { return &consistentInfo{ Version: 1, Locations: make(map[string]string), }, nil } b, err := base64.StdEncoding.DecodeString(enc) if err != nil { return nil, err } ci := &consistentInfo{} if err := json.Unmarshal(b, ci); err != nil { return nil, err } if ci.Locations == nil { ci.Locations = make(map[string]string) } return ci, nil } func (g *Group) save(ctx context.Context, ci *consistentInfo) error { ci.Serial++ b, err := json.Marshal(ci) if err != nil { return err } s := base64.StdEncoding.EncodeToString(b) for { oldAI, err := g.info(ctx) if err != nil { return err } if oldAI.Serial != ci.Serial-1 { return errUpdateConflict } if g.ba.Info == nil { g.ba.Info = make(map[string]string) } g.ba.Info[metaKey+"-"+g.name] = s err = g.b.Update(ctx, g.ba) if err == nil { return nil } if !b2.IsUpdateConflict(err) { return err } // Bucket update conflict; try again. } } // List returns a list of all the group objects. func (g *Group) List(ctx context.Context) ([]string, error) { ci, err := g.info(ctx) if err != nil { return nil, err } var l []string for name := range ci.Locations { l = append(l, name) } return l, nil } type consistentInfo struct { Version int // Serial is incremented for every version saved. If we ensure that // current.Serial = 1 + previous.Serial, and that the bucket metadata is // updated cleanly, then we know that the version we saved is the direct // successor to the version we had. If the bucket metadata doesn't update // cleanly, but the serial relation holds true for the new AI struct, then we // can retry without bothering the user. However, if the serial relation no // longer holds true, it means someone else has updated AI and we have to ask // the user to redo everything they've done. // // However, it is still necessary for higher level constructs to confirm that // the serial number they expect is good. The writer does this, for example, // but comparing the "key" of the file it is replacing. Serial int Locations map[string]string } func random() (string, error) { b := make([]byte, 20) if _, err := rand.Read(b); err != nil { return "", err } return fmt.Sprintf("%x", b), nil } blazer-0.4.4/x/consistent/consistent_test.go000066400000000000000000000062371330654575600212430ustar00rootroot00000000000000package consistent import ( "context" "io/ioutil" "os" "strconv" "sync" "testing" "github.com/kurin/blazer/b2" ) const ( apiID = "B2_ACCOUNT_ID" apiKey = "B2_SECRET_KEY" bucketName = "consistobucket" ) func TestOperationLive(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() g := NewGroup(bucket, "tester") name := "some_kinda_name/thing.txt" var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) i := i go func() { var n int defer wg.Done() for j := 0; j < 10; j++ { if err := g.Operate(ctx, name, func(b []byte) ([]byte, error) { if len(b) > 0 { i, err := strconv.Atoi(string(b)) if err != nil { return nil, err } n = i } return []byte(strconv.Itoa(n + 1)), nil }); err != nil { t.Error(err) } t.Logf("thread %d: successful %d++", i, n) } }() } wg.Wait() r, err := g.NewReader(ctx, name) if err != nil { t.Fatal(err) } defer r.Close() b, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } n, err := strconv.Atoi(string(b)) if err != nil { t.Fatal(err) } if n != 100 { t.Errorf("result: got %d, want 100", n) } } type jsonThing struct { Boop int `json:"boop_field"` Thread int `json:"thread_id"` } func TestOperationJSONLive(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() g := NewGroup(bucket, "tester") name := "some_kinda_json/thing.json" var wg sync.WaitGroup for i := 0; i < 4; i++ { wg.Add(1) i := i go func() { var n int defer wg.Done() for j := 0; j < 4; j++ { // Pass both a struct and a pointer to a struct. var face interface{} face = jsonThing{} if j%2 == 0 { face = &jsonThing{} } if err := g.OperateJSON(ctx, name, face, func(j interface{}) (interface{}, error) { jt := j.(*jsonThing) n = jt.Boop return &jsonThing{ Boop: jt.Boop + 1, Thread: i, }, nil }); err != nil { t.Error(err) } t.Logf("thread %d: successful %d++", i, n) } }() } wg.Wait() if err := g.OperateJSON(ctx, name, &jsonThing{}, func(i interface{}) (interface{}, error) { jt := i.(*jsonThing) if jt.Boop != 16 { t.Errorf("got %d boops; want 16", jt.Boop) } return nil, nil }); err != nil { t.Error(err) } } func startLiveTest(ctx context.Context, t *testing.T) (*b2.Bucket, func()) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") return nil, nil } client, err := b2.NewClient(ctx, id, key) if err != nil { t.Fatal(err) return nil, nil } bucket, err := client.NewBucket(ctx, id+"-"+bucketName, nil) if err != nil { t.Fatal(err) return nil, nil } f := func() { iter := bucket.List(ctx, b2.ListHidden()) for iter.Next() { if err := iter.Object().Delete(ctx); err != nil { t.Error(err) } } if err := iter.Err(); err != nil && !b2.IsNotExist(err) { t.Error(err) } if err := bucket.Delete(ctx); err != nil && !b2.IsNotExist(err) { t.Error(err) } } return bucket, f } type object struct { o *b2.Object err error } blazer-0.4.4/x/transport/000077500000000000000000000000001330654575600153175ustar00rootroot00000000000000blazer-0.4.4/x/transport/transport.go000066400000000000000000000121541330654575600177050ustar00rootroot00000000000000// Copyright 2017, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package transport provides http.RoundTrippers that may be useful to clients // of Blazer. package transport import ( "context" "fmt" "io" "io/ioutil" "math/rand" "net/http" "strings" "sync/atomic" "time" ) // WithFailures returns an http.RoundTripper that wraps an existing // RoundTripper, causing failures according to the options given. If rt is // nil, the http.DefaultTransport is wrapped. func WithFailures(rt http.RoundTripper, opts ...FailureOption) http.RoundTripper { if rt == nil { rt = http.DefaultTransport } o := &options{ rt: rt, } for _, opt := range opts { opt(o) } return o } type options struct { pathSubstrings []string failureRate float64 status int stall time.Duration rt http.RoundTripper msg string trg *triggerReaderGroup } func (o *options) doRequest(req *http.Request) (*http.Response, error) { if o.trg != nil && req.Body != nil { req.Body = o.trg.new(req.Body) } resp, err := o.rt.RoundTrip(req) if resp != nil && o.trg != nil { resp.Body = o.trg.new(resp.Body) } return resp, err } func (o *options) RoundTrip(req *http.Request) (*http.Response, error) { // TODO: fix triggering conditions if rand.Float64() > o.failureRate { return o.doRequest(req) } var match bool if len(o.pathSubstrings) == 0 { match = true } for _, ss := range o.pathSubstrings { if strings.Contains(req.URL.Path, ss) { match = true break } } if !match { return o.doRequest(req) } if o.status > 0 { resp := &http.Response{ Status: fmt.Sprintf("%d %s", o.status, http.StatusText(o.status)), StatusCode: o.status, Body: ioutil.NopCloser(strings.NewReader(o.msg)), Request: req, } return resp, nil } if o.stall > 0 { ctx := req.Context() select { case <-time.After(o.stall): case <-ctx.Done(): } } return o.doRequest(req) } // A FailureOption specifies the kind of failure that the RoundTripper should // display. type FailureOption func(*options) // MatchPathSubstring restricts the RoundTripper to URLs whose paths contain // the given string. The default behavior is to match all paths. func MatchPathSubstring(s string) FailureOption { return func(o *options) { o.pathSubstrings = append(o.pathSubstrings, s) } } // FailureRate causes the RoundTripper to fail a certain percentage of the // time. rate should be a number between 0 and 1, where 0 will never fail and // 1 will always fail. The default is never to fail. func FailureRate(rate float64) FailureOption { return func(o *options) { o.failureRate = rate } } // Response simulates a given status code. The returned http.Response will // have its Status, StatusCode, and Body (with any predefined message) set. func Response(status int) FailureOption { return func(o *options) { o.status = status } } // Stall simulates a network connection failure by stalling for the given // duration. func Stall(dur time.Duration) FailureOption { return func(o *options) { o.stall = dur } } // If a specific Response is requested, the body will have the given message // set. func Body(msg string) FailureOption { return func(o *options) { o.msg = msg } } // Trigger will raise the RoundTripper's failure rate to 100% when the given // context is closed. func Trigger(ctx context.Context) FailureOption { return func(o *options) { go func() { <-ctx.Done() o.failureRate = 1 }() } } // AfterNBytes will call effect once (roughly) n bytes have gone over the wire. // Both sent and received bytes are counted against the total. Only bytes in // the body of an HTTP request are currently counted; this may change in the // future. effect will only be called once, and it will block (allowing // callers to simulate connection hangs). func AfterNBytes(n int, effect func()) FailureOption { return func(o *options) { o.trg = &triggerReaderGroup{ bytes: int64(n), trigger: effect, } } } type triggerReaderGroup struct { bytes int64 trigger func() triggered int64 } func (rg *triggerReaderGroup) new(rc io.ReadCloser) io.ReadCloser { return &triggerReader{ ReadCloser: rc, bytes: &rg.bytes, trigger: rg.trigger, triggered: &rg.triggered, } } type triggerReader struct { io.ReadCloser bytes *int64 trigger func() triggered *int64 } func (r *triggerReader) Read(p []byte) (int, error) { n, err := r.ReadCloser.Read(p) if atomic.AddInt64(r.bytes, -int64(n)) < 0 && atomic.CompareAndSwapInt64(r.triggered, 0, 1) { // Can't use sync.Once because it blocks for *all* callers until Do returns. r.trigger() } return n, err } blazer-0.4.4/x/window/000077500000000000000000000000001330654575600145725ustar00rootroot00000000000000blazer-0.4.4/x/window/accum_test.go000066400000000000000000000025101330654575600172460ustar00rootroot00000000000000// Copyright 2018, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package window_test import ( "fmt" "time" "github.com/kurin/blazer/x/window" ) type Accumulator struct { w *window.Window } func (a Accumulator) Add(s string) { a.w.Insert([]string{s}) } func (a Accumulator) All() []string { v := a.w.Reduce() return v.([]string) } func NewAccum(size time.Duration) Accumulator { r := func(i, j interface{}) interface{} { a, ok := i.([]string) if !ok { a = nil } b, ok := j.([]string) if !ok { b = nil } for _, s := range b { a = append(a, s) } return a } return Accumulator{w: window.New(size, time.Second, r)} } func Example_accumulator() { a := NewAccum(time.Minute) a.Add("this") a.Add("is") a.Add("that") fmt.Printf("total: %v\n", a.All()) // Output: // total: [this is that] } blazer-0.4.4/x/window/counter_test.go000066400000000000000000000022671330654575600176460ustar00rootroot00000000000000// Copyright 2018, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package window_test import ( "fmt" "time" "github.com/kurin/blazer/x/window" ) type Counter struct { w *window.Window } func (c Counter) Add() { c.w.Insert(1) } func (c Counter) Count() int { v := c.w.Reduce() return v.(int) } func New(size time.Duration) Counter { r := func(i, j interface{}) interface{} { a, ok := i.(int) if !ok { a = 0 } b, ok := j.(int) if !ok { b = 0 } return a + b } return Counter{w: window.New(size, time.Second, r)} } func Example_counter() { c := New(time.Minute) c.Add() c.Add() c.Add() fmt.Printf("total: %d\n", c.Count()) // Output: // total: 3 } blazer-0.4.4/x/window/window.go000066400000000000000000000102621330654575600164310ustar00rootroot00000000000000// Copyright 2018, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package window provides a type for efficiently recording events as they // occur over a given span of time. Events added to the window will remain // until the time expires. package window import ( "sync" "time" ) // A Window efficiently records events that have occurred over a span of time // extending from some fixed interval ago to now. Events that pass beyond this // horizon are discarded. type Window struct { mu sync.Mutex events []interface{} res time.Duration last time.Time reduce Reducer forever bool e interface{} } // A Reducer should take two values from the window and combine them into a // third value that will be stored in the window. The values i or j may be // nil. The underlying types for both arguments and the output should be // identical. // // If the reducer is any kind of slice or list, then data usage will grow // linearly with the number of events added to the window. // // Reducer will be called on its own output: Reducer(Reducer(x, y), z). type Reducer func(i, j interface{}) interface{} // New returns an initialized window for events over the given duration at the // given resolution. Windows with tight resolution (i.e., small values for // that argument) will be more accurate, at the cost of some memory. // // A size of 0 means "forever"; old events will never be removed. func New(size, resolution time.Duration, r Reducer) *Window { if size > 0 { return &Window{ res: resolution, events: make([]interface{}, size/resolution), reduce: r, } } return &Window{ forever: true, reduce: r, } } func (w *Window) bucket(now time.Time) int { nanos := now.UnixNano() abs := nanos / int64(w.res) return int(abs) % len(w.events) } // sweep keeps the window valid. It needs to be called from every method that // views or updates the window, and the caller needs to hold the mutex. func (w *Window) sweep(now time.Time) { if w.forever { return } defer func() { w.last = now }() // This compares now and w.last's monotonic clocks. diff := now.Sub(w.last) if diff < 0 { // time went backwards somehow; zero events and return for i := range w.events { w.events[i] = nil } return } last := now.Add(-diff) b := w.bucket(now) p := w.bucket(last) if b == p && diff <= w.res { // We're in the same bucket as the previous sweep, so all buckets are // valid. return } if diff > w.res*time.Duration(len(w.events)) { // We've gone longer than this window measures since the last sweep, just // zero the thing and have done. for i := range w.events { w.events[i] = nil } return } // Expire all invalid buckets. This means buckets not seen since the // previous sweep and now, including the current bucket but not including the // previous bucket. old := int64(last.UnixNano()) / int64(w.res) new := int64(now.UnixNano()) / int64(w.res) for i := old + 1; i <= new; i++ { b := int(i) % len(w.events) w.events[b] = nil } } // Insert adds the given event. func (w *Window) Insert(e interface{}) { w.insertAt(time.Now(), e) } func (w *Window) insertAt(t time.Time, e interface{}) { w.mu.Lock() defer w.mu.Unlock() if w.forever { w.e = w.reduce(w.e, e) return } w.sweep(t) w.events[w.bucket(t)] = w.reduce(w.events[w.bucket(t)], e) } // Reduce runs the window's reducer over the valid values and returns the // result. func (w *Window) Reduce() interface{} { return w.reducedAt(time.Now()) } func (w *Window) reducedAt(t time.Time) interface{} { w.mu.Lock() defer w.mu.Unlock() if w.forever { return w.e } w.sweep(t) var n interface{} for i := range w.events { n = w.reduce(n, w.events[i]) } return n } blazer-0.4.4/x/window/window_test.go000066400000000000000000000054431330654575600174750ustar00rootroot00000000000000// Copyright 2018, Google // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package window import ( "testing" "time" ) type epair struct { e interface{} t time.Time } func adder(i, j interface{}) interface{} { a, ok := i.(int) if !ok { a = 0 } b, ok := j.(int) if !ok { b = 0 } return a + b } func TestWindows(t *testing.T) { table := []struct { size, dur time.Duration incs []epair look time.Time reduce Reducer want interface{} }{ { size: time.Minute, dur: time.Second, incs: []epair{ // year, month, day, hour, min, sec, nano {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 5, 0, time.UTC), e: 1}, }, look: time.Date(2000, 1, 1, 0, 1, 0, 0, time.UTC), want: 5, reduce: adder, }, { incs: []epair{ // year, month, day, hour, min, sec, nano {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 5, 0, time.UTC), e: 1}, }, want: 6, reduce: adder, }, { // what happens if time goes backwards? size: time.Minute, dur: time.Second, incs: []epair{ {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1}, }, look: time.Date(2000, 1, 1, 0, 0, 30, 0, time.UTC), want: 1, reduce: adder, }, } for _, e := range table { w := New(e.size, e.dur, e.reduce) for _, inc := range e.incs { w.insertAt(inc.t, inc.e) } ct := w.reducedAt(e.look) if ct != e.want { t.Errorf("reducedAt(%v) got %v, want %v", e.look, ct, e.want) } } }