pax_global_header00006660000000000000000000000064145132760620014520gustar00rootroot0000000000000052 comment=dcfb54102d643b34e62b311514bfdbe793cf10a6 blazer-0.6.1/000077500000000000000000000000001451327606200130035ustar00rootroot00000000000000blazer-0.6.1/.gitignore000066400000000000000000000004611451327606200147740ustar00rootroot00000000000000bin/bonfire/bonfire # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # IntelliJ .idea # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof blazer-0.6.1/.travis.yml000066400000000000000000000002731451327606200151160ustar00rootroot00000000000000language: go go: - tip branches: only: - master before_script: go run internal/bin/cleanup/cleanup.go script: - go test -v ./base ./b2 ./x/... - go vet -v ./base ./b2 ./x/... blazer-0.6.1/AUTHORS000066400000000000000000000004701451327606200140540ustar00rootroot00000000000000# This is the list of Blazer authors for copyright purposes. # # This does not necessarily list everyone who has contributed code, since in # some cases, their employer may be the copyright holder. To see the full list # of contributors, see the revision history in source control. # # Tag yourself. Google LLC blazer-0.6.1/CHANGELOG.md000066400000000000000000000015261451327606200146200ustar00rootroot00000000000000# Changelog Going forward from v0.6.0 (the first new version after the move to https://github.com/Backblaze/blazer), all notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] - Nothing at present ## [0.6.1] - 2023-10-16 ### Added - `go.mod` file, license report ### Fixed - Resolve import errors ### Changed - Reference license report from README ## [0.6.0] - 2023-09-26 Tagged initial version at https://github.com/Backblaze/blazer [unreleased]: https://github.com/Backblaze/blazer/compare/v0.6.1...HEAD [0.6.1]: https://github.com/Backblaze/blazer/compare/v0.6.0...v0.6.1 [0.6.0]: https://github.com/Backblaze/blazer/compare/v0.5.3...v0.6.0 blazer-0.6.1/CONTRIBUTING.md000066400000000000000000000002751451327606200152400ustar00rootroot00000000000000Want to contribute? Great! First, read this page. ### Code reviews All submissions, including submissions by project members, require review. We use Github pull requests for this purpose. blazer-0.6.1/LICENSE000066400000000000000000000010601451327606200140050ustar00rootroot00000000000000Copyright 2016, the Blazer authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. blazer-0.6.1/README.md000066400000000000000000000073521451327606200142710ustar00rootroot00000000000000Blazer ==== [![GoDoc](https://godoc.org/github.com/Backblaze/blazer/b2?status.svg)](https://godoc.org/github.com/Backblaze/blazer/b2) Blazer is a Golang client library for Backblaze B2 Cloud Object Storage. ```go import "github.com/Backblaze/blazer/b2" ``` Blazer targets the Backblaze B2 Native API. Unless you specifically need to access Backblaze B2 via its Native API, you should use the [MinIO Go Client SDK](https://github.com/minio/minio-go) with Backblaze B2's S3 Compatible SDK. _Many thanks to Toby Burress ([kurin](https://github.com/kurin)) for creating and maintaining Blazer for its first six years._ ## Examples ### Getting started ```go import "os" id := os.Getenv("B2_APPLICATION_KEY_ID") key := os.Getenv("B2_APPLICATION_KEY") ctx := context.Background() // b2_authorize_account b2, err := b2.NewClient(ctx, id, key) if err != nil { log.Fatalln(e) } buckets, err := b2.ListBuckets(ctx) if err != nil { log.Fatalln(e) } ``` ### Copy a file into B2 ```go func copyFile(ctx context.Context, bucket *b2.Bucket, src, dst string) error { f, err := os.Open(src) if err != nil { return err } defer f.Close() obj := bucket.Object(dst) w := obj.NewWriter(ctx) if _, err := io.Copy(w, f); err != nil { w.Close() return err } return w.Close() } ``` If the file is less than 100MB, Blazer will simply buffer the file and use the `b2_upload_file` API to send the file to Backblaze. If the file is greater than 100MB, Blazer will use B2's large file support to upload the file in 100MB chunks. ### Copy a file into B2, with multiple concurrent uploads Uploading a large file with multiple HTTP connections is simple: ```go func copyFile(ctx context.Context, bucket *b2.Bucket, writers int, src, dst string) error { f, err := os.Open(src) if err != nil { return err } defer f.Close() w := bucket.Object(dst).NewWriter(ctx) w.ConcurrentUploads = writers if _, err := io.Copy(w, f); err != nil { w.Close() return err } return w.Close() } ``` This will automatically split the file into `writers` chunks of 100MB uploads. Note that 100MB is the smallest chunk size that B2 supports. ### Download a file from B2 Downloading is as simple as uploading: ```go func downloadFile(ctx context.Context, bucket *b2.Bucket, downloads int, src, dst string) error { r := bucket.Object(src).NewReader(ctx) defer r.Close() f, err := os.Create(dst) if err != nil { return err } r.ConcurrentDownloads = downloads if _, err := io.Copy(f, r); err != nil { f.Close() return err } return f.Close() } ``` ### List all objects in a bucket ```go func printObjects(ctx context.Context, bucket *b2.Bucket) error { iterator := bucket.List(ctx) for iterator.Next() { fmt.Println(iterator.Object()) } return iterator.Err() } ``` ### Grant temporary auth to a file Say you have a number of files in a private bucket, and you want to allow other people to download some files. This is possible to do by issuing a temporary authorization token for the prefix of the files you want to share. ```go token, err := bucket.AuthToken(ctx, "photos", time.Hour) ``` If successful, `token` is then an authorization token valid for one hour, which can be set in HTTP GET requests. The hostname to use when downloading files via HTTP is account-specific and can be found via the BaseURL method: ```go base := bucket.BaseURL() ``` ### Licenses The b2 package currently does not consume any third party packages and entirely depends on imports of the Go stdlib or from sources provided within the `blazer` repository itself. A report of used licenses can be found at `./b2/licenses.csv` which was generated with https://github.com/google/go-licenses . Please double check yourself if this is a concern as this may change over time and the licenses report could become staleblazer-0.6.1/b2/000077500000000000000000000000001451327606200133065ustar00rootroot00000000000000blazer-0.6.1/b2/b2.go000066400000000000000000000426111451327606200141440ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package b2 provides a high-level interface to Backblaze's B2 cloud storage // service. // // It is specifically designed to abstract away the Backblaze API details by // providing familiar Go interfaces, specifically an io.Writer for object // storage, and an io.Reader for object download. Handling of transient // errors, including network and authentication timeouts, is transparent. // // Methods that perform network requests accept a context.Context argument. // Callers should use the context's cancellation abilities to end requests // early, or to provide timeout or deadline guarantees. // // This package is in development and may make API changes. package b2 import ( "context" "fmt" "io" "net/http" "net/url" "regexp" "strconv" "sync" "time" ) // Client is a Backblaze B2 client. type Client struct { backend beRootInterface slock sync.Mutex sWriters map[string]*Writer sReaders map[string]*Reader sMethods []methodCounter opts clientOptions } // NewClient creates and returns a new Client with valid B2 service account // tokens. func NewClient(ctx context.Context, account, key string, opts ...ClientOption) (*Client, error) { c := &Client{ backend: &beRoot{ b2i: &b2Root{}, }, sMethods: []methodCounter{ newMethodCounter(time.Minute, time.Second), newMethodCounter(time.Minute*5, time.Second), newMethodCounter(time.Hour, time.Minute), newMethodCounter(0, 0), // forever }, } opts = append(opts, client(c)) for _, f := range opts { f(&c.opts) } if err := c.backend.authorizeAccount(ctx, account, key, c.opts); err != nil { return nil, err } return c, nil } type clientOptions struct { client *Client transport http.RoundTripper failSomeUploads bool expireTokens bool capExceeded bool apiBase string userAgents []string writerOpts []WriterOption } // A ClientOption allows callers to adjust various per-client settings. type ClientOption func(*clientOptions) // UserAgent sets the User-Agent HTTP header. The default header is // "blazer/"; the value set here will be prepended to that. This can // be set multiple times. // // A user agent is generally of the form "/ ()". func UserAgent(agent string) ClientOption { return func(o *clientOptions) { o.userAgents = append(o.userAgents, agent) } } // APIBase returns a ClientOption specifying the URL root of API requests. func APIBase(url string) ClientOption { return func(o *clientOptions) { o.apiBase = url } } // Transport sets the underlying HTTP transport mechanism. If unset, // http.DefaultTransport is used. func Transport(rt http.RoundTripper) ClientOption { return func(c *clientOptions) { c.transport = rt } } // FailSomeUploads requests intermittent upload failures from the B2 service. // This is mostly useful for testing. func FailSomeUploads() ClientOption { return func(c *clientOptions) { c.failSomeUploads = true } } // ExpireSomeAuthTokens requests intermittent authentication failures from the // B2 service. func ExpireSomeAuthTokens() ClientOption { return func(c *clientOptions) { c.expireTokens = true } } // ForceCapExceeded requests a cap limit from the B2 service. This causes all // uploads to be treated as if they would exceed the configure B2 capacity. func ForceCapExceeded() ClientOption { return func(c *clientOptions) { c.capExceeded = true } } func client(cl *Client) ClientOption { return func(c *clientOptions) { c.client = cl } } type clientTransport struct { client *Client rt http.RoundTripper } func (ct *clientTransport) RoundTrip(r *http.Request) (*http.Response, error) { m := r.Header.Get("X-Blazer-Method") t := ct.rt if t == nil { t = http.DefaultTransport } b := time.Now() resp, err := t.RoundTrip(r) e := time.Now() if err != nil { return resp, err } if m != "" && ct.client != nil { ct.client.slock.Lock() m := method{ name: m, duration: e.Sub(b), status: resp.StatusCode, } for _, counter := range ct.client.sMethods { counter.record(m) } ct.client.slock.Unlock() } return resp, nil } // Bucket is a reference to a B2 bucket. type Bucket struct { b beBucketInterface r beRootInterface c *Client urlPool *urlPool } type BucketType string const ( UnknownType BucketType = "" Private = "allPrivate" Public = "allPublic" Snapshot = "snapshot" ) // BucketAttrs holds a bucket's metadata attributes. type BucketAttrs struct { // Type lists or sets the new bucket type. If Type is UnknownType during a // bucket.Update, the type is not changed. Type BucketType // Info records user data, limited to ten keys. If nil during a // bucket.Update, the existing bucket info is not modified. A bucket's // metadata can be removed by updating with an empty map. Info map[string]string // Reports or sets bucket lifecycle rules. If nil during a bucket.Update, // the rules are not modified. A bucket's rules can be removed by updating // with an empty slice. LifecycleRules []LifecycleRule } // A LifecycleRule describes an object's life cycle, namely how many days after // uploading an object should be hidden, and after how many days hidden an // object should be deleted. Multiple rules may not apply to the same file or // set of files. Be careful when using this feature; it can (is designed to) // delete your data. type LifecycleRule struct { // Prefix specifies all the files in the bucket to which this rule applies. Prefix string // DaysUploadedUntilHidden specifies the number of days after which a file // will automatically be hidden. 0 means "do not automatically hide new // files". DaysNewUntilHidden int // DaysHiddenUntilDeleted specifies the number of days after which a hidden // file is deleted. 0 means "do not automatically delete hidden files". DaysHiddenUntilDeleted int } type b2err struct { err error notFoundErr bool isUpdateConflict bool } func (e b2err) Error() string { return e.err.Error() } // IsNotExist reports whether a given error indicates that an object or bucket // does not exist. func IsNotExist(err error) bool { berr, ok := err.(b2err) if !ok { return false } return berr.notFoundErr } const uploadURLPoolSize = 100 type urlPool struct { ch chan beURLInterface } func newURLPool() *urlPool { return &urlPool{ch: make(chan beURLInterface, uploadURLPoolSize)} } func (p *urlPool) get() beURLInterface { select { case ue := <-p.ch: // if the channel has an upload URL available, use that return ue default: // otherwise return nil, a new upload URL needs to be generated return nil } } func (p *urlPool) put(u beURLInterface) { select { case p.ch <- u: // put the URL back if possible default: // if the channel is full, throw it away } } // Bucket returns a bucket if it exists. func (c *Client) Bucket(ctx context.Context, name string) (*Bucket, error) { buckets, err := c.backend.listBuckets(ctx, name) if err != nil { return nil, err } for _, bucket := range buckets { if bucket.name() == name { return &Bucket{ b: bucket, r: c.backend, c: c, urlPool: newURLPool(), }, nil } } return nil, b2err{ err: fmt.Errorf("%s: bucket not found", name), notFoundErr: true, } } // NewBucket returns a bucket. The bucket is created with the given attributes // if it does not already exist. If attrs is nil, it is created as a private // bucket with no info metadata and no lifecycle rules. func (c *Client) NewBucket(ctx context.Context, name string, attrs *BucketAttrs) (*Bucket, error) { buckets, err := c.backend.listBuckets(ctx, name) if err != nil { return nil, err } for _, bucket := range buckets { if bucket.name() == name { return &Bucket{ b: bucket, r: c.backend, c: c, urlPool: newURLPool(), }, nil } } if attrs == nil { attrs = &BucketAttrs{Type: Private} } b, err := c.backend.createBucket(ctx, name, string(attrs.Type), attrs.Info, attrs.LifecycleRules) if err != nil { return nil, err } return &Bucket{ b: b, r: c.backend, c: c, urlPool: newURLPool(), }, err } // ListBuckets returns all the available buckets. func (c *Client) ListBuckets(ctx context.Context) ([]*Bucket, error) { bs, err := c.backend.listBuckets(ctx, "") if err != nil { return nil, err } var buckets []*Bucket for _, b := range bs { buckets = append(buckets, &Bucket{ b: b, r: c.backend, c: c, urlPool: newURLPool(), }) } return buckets, nil } // IsUpdateConflict reports whether a given error is the result of a bucket // update conflict. func IsUpdateConflict(err error) bool { e, ok := err.(b2err) if !ok { return false } return e.isUpdateConflict } // Update modifies the given bucket with new attributes. It is possible that // this method could fail with an update conflict, in which case you should // retrieve the latest bucket attributes with Attrs and try again. func (b *Bucket) Update(ctx context.Context, attrs *BucketAttrs) error { return b.b.updateBucket(ctx, attrs) } // Attrs retrieves and returns the current bucket's attributes. func (b *Bucket) Attrs(ctx context.Context) (*BucketAttrs, error) { bucket, err := b.c.Bucket(ctx, b.Name()) if err != nil { return nil, err } b.b = bucket.b return b.b.attrs(), nil } var bNotExist = regexp.MustCompile("Bucket.*does not exist") // Delete removes a bucket. The bucket must be empty. func (b *Bucket) Delete(ctx context.Context) error { err := b.b.deleteBucket(ctx) if err == nil { return err } // So, the B2 documentation disagrees with the implementation here, and the // error code is not really helpful. If the bucket doesn't exist, the error is // 400, not 404, and the string is "Bucket does not exist". However, the // documentation says it will be "Bucket id does not exist". In case // they update the implementation to match the documentation, we're just going // to regexp over the error message and hope it's okay. if bNotExist.MatchString(err.Error()) { return b2err{ err: err, notFoundErr: true, } } return err } // BaseURL returns the base URL to use for all files uploaded to this bucket. func (b *Bucket) BaseURL() string { return b.b.baseURL() } // Name returns the bucket's name. func (b *Bucket) Name() string { return b.b.name() } // Object represents a B2 object. type Object struct { attrs *Attrs name string f beFileInterface b *Bucket } // Attrs holds an object's metadata. type Attrs struct { Name string // Not used on upload. Size int64 // Not used on upload. ContentType string // Used on upload, default is "application/octet-stream". Status ObjectState // Not used on upload. UploadTimestamp time.Time // Not used on upload. SHA1 string // Can be "none" for large files. If set on upload, will be used for large files. LastModified time.Time // If present, and there are fewer than 10 keys in the Info field, this is saved on upload. Info map[string]string // Save arbitrary metadata on upload, but limited to 10 keys. } // Name returns an object's name func (o *Object) Name() string { return o.name } // ID returns an object's id func (o *Object) ID() string { return o.f.id() } // Attrs returns an object's attributes. func (o *Object) Attrs(ctx context.Context) (*Attrs, error) { if err := o.ensure(ctx); err != nil { return nil, err } fi, err := o.f.getFileInfo(ctx) if err != nil { return nil, err } name, sha, size, ct, info, st, stamp := fi.stats() var state ObjectState switch st { case "upload": state = Uploaded case "start": state = Started case "hide": state = Hider case "folder": state = Folder } var mtime time.Time if v, ok := info["src_last_modified_millis"]; ok { ms, err := strconv.ParseInt(v, 10, 64) if err != nil { return nil, err } mtime = time.Unix(ms/1e3, (ms%1e3)*1e6) delete(info, "src_last_modified_millis") } if v, ok := info["large_file_sha1"]; ok { sha = v } return &Attrs{ Name: name, Size: size, ContentType: ct, UploadTimestamp: stamp, SHA1: sha, Info: info, Status: state, LastModified: mtime, }, nil } // ObjectState represents the various states an object can be in. type ObjectState int const ( Unknown ObjectState = iota // Started represents a large upload that has been started but not finished // or canceled. Started // Uploaded represents an object that has finished uploading and is complete. Uploaded // Hider represents an object that exists only to hide another object. It // cannot in itself be downloaded and, in particular, is not a hidden object. Hider // Folder is a special state given to non-objects that are returned during a // List call with a ListDelimiter option. Folder ) // Object returns a reference to the named object in the bucket. Hidden // objects cannot be referenced in this manner; they can only be found by // finding the appropriate reference in ListObjects. func (b *Bucket) Object(name string) *Object { return &Object{ name: name, b: b, } } // URL returns the full URL to the given object. func (o *Object) URL() string { return fmt.Sprintf("%s/file/%s/%s", o.b.BaseURL(), o.b.Name(), o.name) } // NewWriter returns a new writer for the given object. Objects that are // overwritten are not deleted, but are "hidden". // // Callers must close the writer when finished and check the error status. func (o *Object) NewWriter(ctx context.Context, opts ...WriterOption) *Writer { ctx, cancel := context.WithCancel(ctx) w := &Writer{ o: o, name: o.name, ctx: ctx, cancel: cancel, } for _, f := range o.b.c.opts.writerOpts { f(w) } for _, f := range opts { f(w) } return w } // NewRangeReader returns a reader for the given object, reading up to length // bytes. If length is negative, the rest of the object is read. func (o *Object) NewRangeReader(ctx context.Context, offset, length int64) *Reader { ctx, cancel := context.WithCancel(ctx) return &Reader{ ctx: ctx, cancel: cancel, o: o, name: o.name, chunks: make(map[int]*rchunk), length: length, offset: offset, } } // NewReader returns a reader for the given object. func (o *Object) NewReader(ctx context.Context) *Reader { return o.NewRangeReader(ctx, 0, -1) } func (o *Object) ensure(ctx context.Context) error { if o.f == nil { f, err := o.b.getObject(ctx, o.name) if err != nil { return err } o.f = f.f } return nil } // Delete removes the given object. func (o *Object) Delete(ctx context.Context) error { if err := o.ensure(ctx); err != nil { return err } return o.f.deleteFileVersion(ctx) } // Hide hides the object from name-based listing. func (o *Object) Hide(ctx context.Context) error { if err := o.ensure(ctx); err != nil { return err } _, err := o.b.b.hideFile(ctx, o.name) return err } // Reveal unhides (if hidden) the named object. If there are multiple objects // of a given name, it will reveal the most recent. func (b *Bucket) Reveal(ctx context.Context, name string) error { iter := b.List(ctx, ListPrefix(name), ListHidden()) for iter.Next() { obj := iter.Object() if obj.Name() == name { if obj.f.status() == "hide" { return obj.Delete(ctx) } return nil } if obj.Name() > name { break } } return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true} } // I don't want to import all of ioutil for this. type discard struct{} func (discard) Write(p []byte) (int, error) { return len(p), nil } func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) { fr, err := b.b.downloadFileByName(ctx, name, 0, 0, true) if err != nil { return nil, err } io.Copy(discard{}, fr) fr.Close() return &Object{ name: name, f: b.b.file(fr.id(), name), b: b, }, nil } // AuthToken returns an authorization token that can be used to access objects // in a private bucket. Only objects that begin with prefix can be accessed. // The token expires after the given duration. func (b *Bucket) AuthToken(ctx context.Context, prefix string, valid time.Duration) (string, error) { return b.b.getDownloadAuthorization(ctx, prefix, valid, "") } // AuthURL returns a URL for the given object with embedded token and, // possibly, b2ContentDisposition arguments. Leave b2cd blank for no content // disposition. func (o *Object) AuthURL(ctx context.Context, valid time.Duration, b2cd string) (*url.URL, error) { token, err := o.b.b.getDownloadAuthorization(ctx, o.name, valid, b2cd) if err != nil { return nil, err } urlString := fmt.Sprintf("%s?Authorization=%s", o.URL(), url.QueryEscape(token)) if b2cd != "" { urlString = fmt.Sprintf("%s&b2ContentDisposition=%s", urlString, url.QueryEscape(b2cd)) } u, err := url.Parse(urlString) if err != nil { return nil, err } return u, nil } blazer-0.6.1/b2/b2_test.go000066400000000000000000000505431451327606200152060ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "bytes" "context" "crypto/sha1" "fmt" "io" "io/ioutil" "net/http" "sort" "strings" "sync" "testing" "time" ) const ( bucketName = "b2-tests" smallFileName = "Teeny Tiny" largeFileName = "BigBytes" ) var gmux = &sync.Mutex{} type testError struct { retry bool backoff time.Duration reauth bool reupload bool } func (t testError) Error() string { return fmt.Sprintf("retry %v; backoff %v; reauth %v; reupload %v", t.retry, t.backoff, t.reauth, t.reupload) } type errCont struct { errMap map[string]map[int]error opMap map[string]int } func (e *errCont) getError(name string) error { if e.errMap == nil { return nil } if e.opMap == nil { e.opMap = make(map[string]int) } i := e.opMap[name] e.opMap[name]++ return e.errMap[name][i] } type testRoot struct { errs *errCont auths int bucketMap map[string]map[string]string } func (t *testRoot) authorizeAccount(context.Context, string, string, clientOptions) error { t.auths++ return nil } func (t *testRoot) backoff(err error) time.Duration { e, ok := err.(testError) if !ok { return 0 } return e.backoff } func (t *testRoot) reauth(err error) bool { e, ok := err.(testError) if !ok { return false } return e.reauth } func (t *testRoot) reupload(err error) bool { e, ok := err.(testError) if !ok { return false } return e.reupload } func (t *testRoot) transient(err error) bool { e, ok := err.(testError) if !ok { return false } return e.retry || e.reupload || e.backoff > 0 } func (t *testRoot) createKey(context.Context, string, []string, time.Duration, string, string) (b2KeyInterface, error) { return nil, nil } func (t *testRoot) listKeys(context.Context, int, string) ([]b2KeyInterface, string, error) { return nil, "", nil } func (t *testRoot) createBucket(_ context.Context, name, _ string, _ map[string]string, _ []LifecycleRule) (b2BucketInterface, error) { if err := t.errs.getError("createBucket"); err != nil { return nil, err } if _, ok := t.bucketMap[name]; ok { return nil, fmt.Errorf("%s: bucket exists", name) } m := make(map[string]string) t.bucketMap[name] = m return &testBucket{ n: name, errs: t.errs, files: m, }, nil } func (t *testRoot) listBuckets(context.Context, string) ([]b2BucketInterface, error) { var b []b2BucketInterface for k, v := range t.bucketMap { b = append(b, &testBucket{ n: k, errs: t.errs, files: v, }) } return b, nil } type testBucket struct { n string errs *errCont files map[string]string } func (t *testBucket) name() string { return t.n } func (t *testBucket) btype() string { return "allPrivate" } func (t *testBucket) attrs() *BucketAttrs { return nil } func (t *testBucket) deleteBucket(context.Context) error { return nil } func (t *testBucket) updateBucket(context.Context, *BucketAttrs) error { return nil } func (t *testBucket) id() string { return "" } func (t *testBucket) getUploadURL(context.Context) (b2URLInterface, error) { if err := t.errs.getError("getUploadURL"); err != nil { return nil, err } return &testURL{ files: t.files, }, nil } func (t *testBucket) startLargeFile(_ context.Context, name, _ string, _ map[string]string) (b2LargeFileInterface, error) { return &testLargeFile{ name: name, parts: make(map[int][]byte), files: t.files, errs: t.errs, }, nil } func (t *testBucket) listFileNames(ctx context.Context, count int, cont, pfx, del string) ([]b2FileInterface, string, error) { var f []string gmux.Lock() defer gmux.Unlock() for name := range t.files { f = append(f, name) } sort.Strings(f) idx := sort.SearchStrings(f, cont) var b []b2FileInterface var next string for i := idx; i < len(f) && i-idx < count; i++ { b = append(b, &testFile{ n: f[i], s: int64(len(t.files[f[i]])), files: t.files, }) if i+1 < len(f) { next = f[i+1] } if i+1 == len(f) { next = "" } } return b, next, nil } func (t *testBucket) listFileVersions(ctx context.Context, count int, a, b, c, d string) ([]b2FileInterface, string, string, error) { x, y, z := t.listFileNames(ctx, count, a, c, d) return x, y, "", z } func (t *testBucket) listUnfinishedLargeFiles(ctx context.Context, count int, cont string) ([]b2FileInterface, string, error) { return nil, "", fmt.Errorf("testBucket.listUnfinishedLargeFiles(ctx, %d, %q): not implemented", count, cont) } func (t *testBucket) downloadFileByName(_ context.Context, name string, offset, size int64, _ bool) (b2FileReaderInterface, error) { gmux.Lock() defer gmux.Unlock() f := t.files[name] end := int(offset + size) if end >= len(f) { end = len(f) } if int(offset) >= len(f) { return nil, errNoMoreContent } return &testFileReader{ b: ioutil.NopCloser(bytes.NewBufferString(f[offset:end])), s: end - int(offset), n: name, }, nil } func (t *testBucket) hideFile(context.Context, string) (b2FileInterface, error) { return nil, nil } func (t *testBucket) getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error) { return "", nil } func (t *testBucket) baseURL() string { return "" } func (t *testBucket) file(id, name string) b2FileInterface { return nil } type testURL struct { files map[string]string } func (t *testURL) reload(context.Context) error { return nil } func (t *testURL) uploadFile(_ context.Context, r io.Reader, _ int, name, _, _ string, _ map[string]string) (b2FileInterface, error) { buf := &bytes.Buffer{} if _, err := io.Copy(buf, r); err != nil { return nil, err } gmux.Lock() defer gmux.Unlock() t.files[name] = buf.String() return &testFile{ n: name, s: int64(len(t.files[name])), files: t.files, }, nil } type testLargeFile struct { name string parts map[int][]byte files map[string]string errs *errCont } func (t *testLargeFile) finishLargeFile(context.Context) (b2FileInterface, error) { var total []byte gmux.Lock() defer gmux.Unlock() for i := 1; i <= len(t.parts); i++ { total = append(total, t.parts[i]...) } t.files[t.name] = string(total) return &testFile{ n: t.name, s: int64(len(total)), files: t.files, }, nil } func (t *testLargeFile) getUploadPartURL(context.Context) (b2FileChunkInterface, error) { gmux.Lock() defer gmux.Unlock() return &testFileChunk{ parts: t.parts, errs: t.errs, }, nil } func (t *testLargeFile) cancel(ctx context.Context) error { return ctx.Err() } type testFileChunk struct { parts map[int][]byte errs *errCont } func (t *testFileChunk) reload(context.Context) error { return nil } func (t *testFileChunk) uploadPart(_ context.Context, r io.Reader, _ string, _, index int) (int, error) { if err := t.errs.getError("uploadPart"); err != nil { return 0, err } buf := &bytes.Buffer{} i, err := io.Copy(buf, r) if err != nil { return int(i), err } gmux.Lock() defer gmux.Unlock() t.parts[index] = buf.Bytes() return int(i), nil } type testFile struct { n string s int64 t time.Time a string files map[string]string } func (t *testFile) id() string { return t.n } func (t *testFile) name() string { return t.n } func (t *testFile) size() int64 { return t.s } func (t *testFile) timestamp() time.Time { return t.t } func (t *testFile) status() string { return t.a } func (t *testFile) compileParts(int64, map[int]string) b2LargeFileInterface { panic("not implemented") } func (t *testFile) getFileInfo(context.Context) (b2FileInfoInterface, error) { return nil, nil } func (t *testFile) listParts(context.Context, int, int) ([]b2FilePartInterface, int, error) { return nil, 0, nil } func (t *testFile) deleteFileVersion(context.Context) error { gmux.Lock() defer gmux.Unlock() delete(t.files, t.n) return nil } type testFileReader struct { b io.ReadCloser s int n string } func (t *testFileReader) Read(p []byte) (int, error) { return t.b.Read(p) } func (t *testFileReader) Close() error { return nil } func (t *testFileReader) stats() (int, string, string, map[string]string) { return t.s, "", "", nil } func (t *testFileReader) id() string { return t.n } type zReader struct{} var pattern = []byte{0x02, 0x80, 0xff, 0x1a, 0xcc, 0x63, 0x22} func (zReader) Read(p []byte) (int, error) { for i := 0; i+len(pattern) < len(p); i += len(pattern) { copy(p[i:], pattern) } return len(p), nil } type zReadSeeker struct { size int64 pos int64 } func (rs *zReadSeeker) Read(p []byte) (int, error) { for i := rs.pos; ; i++ { j := int(i - rs.pos) if j >= len(p) || i >= rs.size { var rtn error if i >= rs.size { rtn = io.EOF } rs.pos = i return j, rtn } f := int(i) % len(pattern) p[j] = pattern[f] } } func (rs *zReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { case io.SeekStart: rs.pos = offset case io.SeekEnd: rs.pos = rs.size + offset } return rs.pos, nil } func TestReaderFrom(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() table := []struct { size, pos int64 }{ { size: 10, }, } for _, e := range table { client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{}, }, }, } bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } defer func() { if err := bucket.Delete(ctx); err != nil { t.Error(err) } }() r := &zReadSeeker{pos: e.pos, size: e.size} w := bucket.Object("writer").NewWriter(ctx) n, err := w.ReadFrom(r) if err != nil { t.Errorf("ReadFrom(): %v", err) } if n != e.size { t.Errorf("ReadFrom(): got %d bytes, wanted %d bytes", n, e.size) } } } func TestReauth(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() root := &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "createBucket": {0: testError{reauth: true}}, }, }, } client := &Client{ backend: &beRoot{ b2i: root, }, } auths := root.auths if _, err := client.NewBucket(ctx, "fun", &BucketAttrs{Type: Private}); err != nil { t.Errorf("bucket should not err, got %v", err) } if root.auths != auths+1 { t.Errorf("client should have re-authenticated; did not") } } func TestBackoff(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() var calls []time.Duration ch := make(chan time.Time) close(ch) after = func(d time.Duration) <-chan time.Time { calls = append(calls, d) return ch } table := []struct { root *testRoot want int }{ { root: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "createBucket": { 0: testError{backoff: time.Second}, 1: testError{backoff: 2 * time.Second}, }, }, }, }, want: 2, }, { root: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "getUploadURL": { 0: testError{retry: true}, }, }, }, }, want: 1, }, } var total int for _, ent := range table { client := &Client{ backend: &beRoot{ b2i: ent.root, }, } b, err := client.NewBucket(ctx, "fun", &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } o := b.Object("foo") w := o.NewWriter(ctx) if _, err := io.Copy(w, bytes.NewBufferString("foo")); err != nil { t.Fatal(err) } if err := w.Close(); err != nil { t.Fatal(err) } total += ent.want } if len(calls) != total { t.Errorf("got %d calls, wanted %d", len(calls), total) } } func TestBackoffWithoutRetryAfter(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() var calls []time.Duration ch := make(chan time.Time) close(ch) after = func(d time.Duration) <-chan time.Time { calls = append(calls, d) return ch } root := &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "createBucket": { 0: testError{retry: true}, 1: testError{retry: true}, }, }, }, } client := &Client{ backend: &beRoot{ b2i: root, }, } if _, err := client.NewBucket(ctx, "fun", &BucketAttrs{Type: Private}); err != nil { t.Errorf("bucket should not err, got %v", err) } if len(calls) != 2 { t.Errorf("wrong number of backoff calls; got %d, want 2", len(calls)) } } type badTransport struct{} func (badTransport) RoundTrip(r *http.Request) (*http.Response, error) { return &http.Response{ Status: "700 What", StatusCode: 700, Body: ioutil.NopCloser(bytes.NewBufferString("{}")), Request: r, }, nil } func TestCustomTransport(t *testing.T) { ctx := context.Background() // Sorta fragile but... _, err := NewClient(ctx, "abcd", "efgh", Transport(badTransport{})) if err == nil { t.Error("NewClient returned successfully, expected an error") } if !strings.Contains(err.Error(), "700") { t.Errorf("Expected nonsense error code 700, got %v", err) } } func TestReaderDoubleClose(t *testing.T) { ctx := context.Background() client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{}, }, }, } bucket, err := client.NewBucket(ctx, "bucket", &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } o, _, err := writeFile(ctx, bucket, "file", 10, 10) if err != nil { t.Fatal(err) } r := o.NewReader(ctx) // Read to EOF, and then read some more. if _, err := io.Copy(ioutil.Discard, r); err != nil { t.Fatal(err) } if _, err := io.Copy(ioutil.Discard, r); err != nil { t.Fatal(err) } } func TestReadWrite(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{}, }, }, } bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } defer func() { if err := bucket.Delete(ctx); err != nil { t.Error(err) } }() sobj, wsha, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8) if err != nil { t.Fatal(err) } defer func() { if err := sobj.Delete(ctx); err != nil { t.Error(err) } }() if err := readFile(ctx, sobj, wsha, 1e5, 10); err != nil { t.Error(err) } lobj, wshaL, err := writeFile(ctx, bucket, largeFileName, 1e6-1e5, 1e4) if err != nil { t.Fatal(err) } defer func() { if err := lobj.Delete(ctx); err != nil { t.Error(err) } }() if err := readFile(ctx, lobj, wshaL, 1e7, 10); err != nil { t.Error(err) } } func TestLargeFileCancellation(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{}, }, }, } b, err := client.NewBucket(ctx, bucketName, nil) if err != nil { t.Fatal(err) } var called bool w := b.Object("foo").NewWriter(ctx, WithCancelOnError(func() context.Context { return context.Background() }, func(err error) { called = true if err != nil { t.Errorf("expected no error, got %v", err) } })) w.ChunkSize = 10 for i := 0; i < 10; i++ { r := io.LimitReader(zReader{}, 20) if _, err := io.Copy(w, r); err != nil && err != context.Canceled { t.Errorf("Copy: %v", err) } cancel() } if err := w.Close(); err != context.Canceled { t.Errorf("expected cancelled context; got %v", err) } if !called { t.Errorf("error callback not called") } } func TestReadRangeReturnsRight(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{}, }, }, } bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } defer func() { if err := bucket.Delete(ctx); err != nil { t.Error(err) } }() obj, _, err := writeFile(ctx, bucket, "file", 1e6+42, 1e8) if err != nil { t.Fatal(err) } r := obj.NewRangeReader(ctx, 200, 1400) r.ChunkSize = 1000 i, err := io.Copy(ioutil.Discard, r) if err != nil { t.Error(err) } if i != 1400 { t.Errorf("NewRangeReader(_, 200, 1400): want 1400, got %d", i) } } func TestWriterReturnsError(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() client := &Client{ backend: &beRoot{ b2i: &testRoot{ bucketMap: make(map[string]map[string]string), errs: &errCont{ errMap: map[string]map[int]error{ "uploadPart": { 0: testError{}, 1: testError{}, 2: testError{}, 3: testError{}, 4: testError{}, 5: testError{}, 6: testError{}, }, }, }, }, }, } bucket, err := client.NewBucket(ctx, bucketName, &BucketAttrs{Type: Private}) if err != nil { t.Fatal(err) } w := bucket.Object("test").NewWriter(ctx) r := io.LimitReader(zReader{}, 1e7) w.ChunkSize = 1e4 w.ConcurrentUploads = 4 if _, err := io.Copy(w, r); err == nil { t.Fatalf("io.Copy: should have returned an error") } } func TestFileBuffer(t *testing.T) { r := io.LimitReader(zReader{}, 1e8) w, err := newFileBuffer("") if err != nil { t.Fatal(err) } defer w.Close() if _, err := io.Copy(w, r); err != nil { t.Fatal(err) } bReader, err := w.Reader() if err != nil { t.Fatal(err) } hsh := sha1.New() if _, err := io.Copy(hsh, bReader); err != nil { t.Fatal(err) } hshText := fmt.Sprintf("%x", hsh.Sum(nil)) if hshText != w.Hash() { t.Errorf("hashes are not equal: bufferWriter is %q, read buffer is %q", w.Hash(), hshText) } } func TestNonBuffer(t *testing.T) { table := []struct { str string off int64 len int64 want string }{ { str: "a string", off: 0, len: 3, want: "a s", }, { str: "a string", off: 3, len: 1, want: "t", }, { str: "a string", off: 3, len: 5, want: "tring", }, } for _, e := range table { nb := newNonBuffer(strings.NewReader(e.str), e.off, e.len) want := fmt.Sprintf("%s%x", e.want, sha1.Sum([]byte(e.str[int(e.off):int(e.off+e.len)]))) r, err := nb.Reader() if err != nil { t.Error(err) continue } got, err := ioutil.ReadAll(r) if err != nil { t.Errorf("ioutil.ReadAll(%#v): %v", e, err) continue } if want != string(got) { t.Errorf("ioutil.ReadAll(%#v): got %q, want %q", e, string(got), want) } } } func writeFile(ctx context.Context, bucket *Bucket, name string, size int64, csize int) (*Object, string, error) { r := io.LimitReader(zReader{}, size) o := bucket.Object(name) f := o.NewWriter(ctx) h := sha1.New() w := io.MultiWriter(f, h) f.ConcurrentUploads = 5 f.ChunkSize = csize n, err := io.Copy(w, r) if err != nil { return nil, "", err } if n != size { return nil, "", fmt.Errorf("io.Copy(): wrote %d bytes; wanted %d bytes", n, size) } if err := f.Close(); err != nil { return nil, "", err } return o, fmt.Sprintf("%x", h.Sum(nil)), nil } func readFile(ctx context.Context, obj *Object, sha string, chunk, concur int) error { r := obj.NewReader(ctx) r.ChunkSize = chunk r.ConcurrentDownloads = concur h := sha1.New() if _, err := io.Copy(h, r); err != nil { return err } if err := r.Close(); err != nil { return err } rsha := fmt.Sprintf("%x", h.Sum(nil)) if sha != rsha { return fmt.Errorf("bad hash: got %s, want %s", rsha, sha) } return nil } blazer-0.6.1/b2/backend.go000066400000000000000000000451401451327606200152300ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "context" "io" "math/rand" "time" ) // This file wraps the baseline interfaces with backoff and retry semantics. type beRootInterface interface { backoff(error) time.Duration reauth(error) bool transient(error) bool reupload(error) bool authorizeAccount(context.Context, string, string, clientOptions) error reauthorizeAccount(context.Context) error createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) listBuckets(context.Context, string) ([]beBucketInterface, error) createKey(context.Context, string, []string, time.Duration, string, string) (beKeyInterface, error) listKeys(context.Context, int, string) ([]beKeyInterface, string, error) } type beRoot struct { account, key string b2i b2RootInterface options clientOptions } type beBucketInterface interface { name() string btype() BucketType attrs() *BucketAttrs id() string updateBucket(context.Context, *BucketAttrs) error deleteBucket(context.Context) error getUploadURL(context.Context) (beURLInterface, error) startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (beLargeFileInterface, error) listFileNames(context.Context, int, string, string, string) ([]beFileInterface, string, error) listFileVersions(context.Context, int, string, string, string, string) ([]beFileInterface, string, string, error) listUnfinishedLargeFiles(context.Context, int, string) ([]beFileInterface, string, error) downloadFileByName(context.Context, string, int64, int64, bool) (beFileReaderInterface, error) hideFile(context.Context, string) (beFileInterface, error) getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error) baseURL() string file(string, string) beFileInterface } type beBucket struct { b2bucket b2BucketInterface ri beRootInterface } type beURLInterface interface { uploadFile(context.Context, readResetter, int, string, string, string, map[string]string) (beFileInterface, error) } type beURL struct { b2url b2URLInterface ri beRootInterface } type beFileInterface interface { name() string id() string size() int64 timestamp() time.Time status() string deleteFileVersion(context.Context) error getFileInfo(context.Context) (beFileInfoInterface, error) listParts(context.Context, int, int) ([]beFilePartInterface, int, error) compileParts(int64, map[int]string) beLargeFileInterface } type beFile struct { b2file b2FileInterface url beURLInterface ri beRootInterface } type beLargeFileInterface interface { finishLargeFile(context.Context) (beFileInterface, error) getUploadPartURL(context.Context) (beFileChunkInterface, error) cancel(context.Context) error } type beLargeFile struct { b2largeFile b2LargeFileInterface ri beRootInterface } type beFileChunkInterface interface { reload(context.Context) error uploadPart(context.Context, readResetter, string, int, int) (int, error) } type beFileChunk struct { b2fileChunk b2FileChunkInterface ri beRootInterface } type beFileReaderInterface interface { io.ReadCloser stats() (int, string, string, map[string]string) id() string } type beFileReader struct { b2fileReader b2FileReaderInterface ri beRootInterface } type beFileInfoInterface interface { stats() (string, string, int64, string, map[string]string, string, time.Time) } type beFilePartInterface interface { number() int sha1() string size() int64 } type beFilePart struct { b2filePart b2FilePartInterface ri beRootInterface } type beFileInfo struct { name string sha string size int64 ct string info map[string]string status string stamp time.Time } type beKeyInterface interface { del(context.Context) error caps() []string name() string expires() time.Time secret() string id() string } type beKey struct { b2i beRootInterface k b2KeyInterface } func (r *beRoot) backoff(err error) time.Duration { return r.b2i.backoff(err) } func (r *beRoot) reauth(err error) bool { return r.b2i.reauth(err) } func (r *beRoot) reupload(err error) bool { return r.b2i.reupload(err) } func (r *beRoot) transient(err error) bool { return r.b2i.transient(err) } func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error { f := func() error { if err := r.b2i.authorizeAccount(ctx, account, key, c); err != nil { return err } r.account = account r.key = key r.options = c return nil } return withBackoff(ctx, r, f) } func (r *beRoot) reauthorizeAccount(ctx context.Context) error { return r.authorizeAccount(ctx, r.account, r.key, r.options) } func (r *beRoot) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) { var bi beBucketInterface f := func() error { g := func() error { bucket, err := r.b2i.createBucket(ctx, name, btype, info, rules) if err != nil { return err } bi = &beBucket{ b2bucket: bucket, ri: r, } return nil } return withReauth(ctx, r, g) } if err := withBackoff(ctx, r, f); err != nil { return nil, err } return bi, nil } func (r *beRoot) listBuckets(ctx context.Context, name string) ([]beBucketInterface, error) { var buckets []beBucketInterface f := func() error { g := func() error { bs, err := r.b2i.listBuckets(ctx, name) if err != nil { return err } for _, b := range bs { buckets = append(buckets, &beBucket{ b2bucket: b, ri: r, }) } return nil } return withReauth(ctx, r, g) } if err := withBackoff(ctx, r, f); err != nil { return nil, err } return buckets, nil } func (r *beRoot) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (beKeyInterface, error) { var k *beKey f := func() error { g := func() error { got, err := r.b2i.createKey(ctx, name, caps, valid, bucketID, prefix) if err != nil { return err } k = &beKey{ b2i: r, k: got, } return nil } return withReauth(ctx, r, g) } if err := withBackoff(ctx, r, f); err != nil { return nil, err } return k, nil } func (r *beRoot) listKeys(ctx context.Context, max int, next string) ([]beKeyInterface, string, error) { var keys []beKeyInterface var cur string f := func() error { g := func() error { got, n, err := r.b2i.listKeys(ctx, max, next) if err != nil { return err } cur = n for _, g := range got { keys = append(keys, &beKey{ b2i: r, k: g, }) } return nil } return withReauth(ctx, r, g) } if err := withBackoff(ctx, r, f); err != nil { return nil, "", err } return keys, cur, nil } func (b *beBucket) name() string { return b.b2bucket.name() } func (b *beBucket) btype() BucketType { return BucketType(b.b2bucket.btype()) } func (b *beBucket) attrs() *BucketAttrs { return b.b2bucket.attrs() } func (b *beBucket) id() string { return b.b2bucket.id() } func (b *beBucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error { f := func() error { g := func() error { return b.b2bucket.updateBucket(ctx, attrs) } return withReauth(ctx, b.ri, g) } return withBackoff(ctx, b.ri, f) } func (b *beBucket) deleteBucket(ctx context.Context) error { f := func() error { g := func() error { return b.b2bucket.deleteBucket(ctx) } return withReauth(ctx, b.ri, g) } return withBackoff(ctx, b.ri, f) } func (b *beBucket) getUploadURL(ctx context.Context) (beURLInterface, error) { var url beURLInterface f := func() error { g := func() error { u, err := b.b2bucket.getUploadURL(ctx) if err != nil { return err } url = &beURL{ b2url: u, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return url, nil } func (b *beBucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (beLargeFileInterface, error) { var file beLargeFileInterface f := func() error { g := func() error { f, err := b.b2bucket.startLargeFile(ctx, name, ct, info) if err != nil { return err } file = &beLargeFile{ b2largeFile: f, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return file, nil } func (b *beBucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]beFileInterface, string, error) { var cont string var files []beFileInterface f := func() error { g := func() error { fs, c, err := b.b2bucket.listFileNames(ctx, count, continuation, prefix, delimiter) if err != nil { return err } cont = c for _, f := range fs { files = append(files, &beFile{ b2file: f, ri: b.ri, }) } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, "", err } return files, cont, nil } func (b *beBucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]beFileInterface, string, string, error) { var name, id string var files []beFileInterface f := func() error { g := func() error { fs, n, d, err := b.b2bucket.listFileVersions(ctx, count, nextName, nextID, prefix, delimiter) if err != nil { return err } name = n id = d for _, f := range fs { files = append(files, &beFile{ b2file: f, ri: b.ri, }) } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, "", "", err } return files, name, id, nil } func (b *beBucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]beFileInterface, string, error) { var cont string var files []beFileInterface f := func() error { g := func() error { fs, c, err := b.b2bucket.listUnfinishedLargeFiles(ctx, count, continuation) if err != nil { return err } cont = c for _, f := range fs { files = append(files, &beFile{ b2file: f, ri: b.ri, }) } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, "", err } return files, cont, nil } func (b *beBucket) downloadFileByName(ctx context.Context, name string, offset, size int64, header bool) (beFileReaderInterface, error) { var reader beFileReaderInterface f := func() error { g := func() error { fr, err := b.b2bucket.downloadFileByName(ctx, name, offset, size, header) if err != nil { return err } reader = &beFileReader{ b2fileReader: fr, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return reader, nil } func (b *beBucket) hideFile(ctx context.Context, name string) (beFileInterface, error) { var file beFileInterface f := func() error { g := func() error { f, err := b.b2bucket.hideFile(ctx, name) if err != nil { return err } file = &beFile{ b2file: f, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return file, nil } func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) { var tok string f := func() error { g := func() error { t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v, s) if err != nil { return err } tok = t return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return "", err } return tok, nil } func (b *beBucket) baseURL() string { return b.b2bucket.baseURL() } func (b *beBucket) file(id, name string) beFileInterface { return &beFile{ b2file: b.b2bucket.file(id, name), ri: b.ri, } } func (b *beURL) uploadFile(ctx context.Context, r readResetter, size int, name, ct, sha1 string, info map[string]string) (beFileInterface, error) { var file beFileInterface f := func() error { if err := r.Reset(); err != nil { return err } f, err := b.b2url.uploadFile(ctx, r, size, name, ct, sha1, info) if err != nil { return err } file = &beFile{ b2file: f, url: b, ri: b.ri, } return nil } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return file, nil } func (b *beFile) deleteFileVersion(ctx context.Context) error { f := func() error { g := func() error { return b.b2file.deleteFileVersion(ctx) } return withReauth(ctx, b.ri, g) } return withBackoff(ctx, b.ri, f) } func (b *beFile) size() int64 { return b.b2file.size() } func (b *beFile) name() string { return b.b2file.name() } func (b *beFile) id() string { return b.b2file.id() } func (b *beFile) timestamp() time.Time { return b.b2file.timestamp() } func (b *beFile) status() string { return b.b2file.status() } func (b *beFile) getFileInfo(ctx context.Context) (beFileInfoInterface, error) { var fileInfo beFileInfoInterface f := func() error { g := func() error { fi, err := b.b2file.getFileInfo(ctx) if err != nil { return err } name, sha, size, ct, info, status, stamp := fi.stats() fileInfo = &beFileInfo{ name: name, sha: sha, size: size, ct: ct, info: info, status: status, stamp: stamp, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return fileInfo, nil } func (b *beFile) listParts(ctx context.Context, next, count int) ([]beFilePartInterface, int, error) { var fpi []beFilePartInterface var rnxt int f := func() error { g := func() error { ps, n, err := b.b2file.listParts(ctx, next, count) if err != nil { return err } rnxt = n for _, p := range ps { fpi = append(fpi, &beFilePart{ b2filePart: p, ri: b.ri, }) } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, 0, err } return fpi, rnxt, nil } func (b *beFile) compileParts(size int64, seen map[int]string) beLargeFileInterface { return &beLargeFile{ b2largeFile: b.b2file.compileParts(size, seen), ri: b.ri, } } func (b *beLargeFile) getUploadPartURL(ctx context.Context) (beFileChunkInterface, error) { var chunk beFileChunkInterface f := func() error { g := func() error { fc, err := b.b2largeFile.getUploadPartURL(ctx) if err != nil { return err } chunk = &beFileChunk{ b2fileChunk: fc, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return chunk, nil } func (b *beLargeFile) finishLargeFile(ctx context.Context) (beFileInterface, error) { var file beFileInterface f := func() error { g := func() error { f, err := b.b2largeFile.finishLargeFile(ctx) if err != nil { return err } file = &beFile{ b2file: f, ri: b.ri, } return nil } return withReauth(ctx, b.ri, g) } if err := withBackoff(ctx, b.ri, f); err != nil { return nil, err } return file, nil } func (b *beLargeFile) cancel(ctx context.Context) error { f := func() error { g := func() error { return b.b2largeFile.cancel(ctx) } return withReauth(ctx, b.ri, g) } return withBackoff(ctx, b.ri, f) } func (b *beFileChunk) reload(ctx context.Context) error { f := func() error { g := func() error { return b.b2fileChunk.reload(ctx) } return withReauth(ctx, b.ri, g) } return withBackoff(ctx, b.ri, f) } func (b *beFileChunk) uploadPart(ctx context.Context, r readResetter, sha1 string, size, index int) (int, error) { // no re-auth; pass it back up to the caller so they can get an new upload URI and token // TODO: we should handle that here probably var i int f := func() error { if err := r.Reset(); err != nil { return err } j, err := b.b2fileChunk.uploadPart(ctx, r, sha1, size, index) if err != nil { return err } i = j return nil } if err := withBackoff(ctx, b.ri, f); err != nil { return 0, err } return i, nil } func (b *beFileReader) Read(p []byte) (int, error) { return b.b2fileReader.Read(p) } func (b *beFileReader) Close() error { return b.b2fileReader.Close() } func (b *beFileReader) stats() (int, string, string, map[string]string) { return b.b2fileReader.stats() } func (b *beFileReader) id() string { return b.b2fileReader.id() } func (b *beFileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) { return b.name, b.sha, b.size, b.ct, b.info, b.status, b.stamp } func (b *beFilePart) number() int { return b.b2filePart.number() } func (b *beFilePart) sha1() string { return b.b2filePart.sha1() } func (b *beFilePart) size() int64 { return b.b2filePart.size() } func (b *beKey) del(ctx context.Context) error { return b.k.del(ctx) } func (b *beKey) caps() []string { return b.k.caps() } func (b *beKey) name() string { return b.k.name() } func (b *beKey) expires() time.Time { return b.k.expires() } func (b *beKey) secret() string { return b.k.secret() } func (b *beKey) id() string { return b.k.id() } func jitter(d time.Duration) time.Duration { f := float64(d) f /= 50 f += f * (rand.Float64() - 0.5) return time.Duration(f) } func getBackoff(d time.Duration) time.Duration { if d > 30*time.Second { return 30*time.Second + jitter(d) } return d*2 + jitter(d*2) } var after = time.After func withBackoff(ctx context.Context, ri beRootInterface, f func() error) error { backoff := 500 * time.Millisecond for { err := f() if !ri.transient(err) { return err } bo := ri.backoff(err) if bo > 0 { backoff = bo } else { backoff = getBackoff(backoff) } select { case <-ctx.Done(): return ctx.Err() case <-after(backoff): } } } func withReauth(ctx context.Context, ri beRootInterface, f func() error) error { err := f() if ri.reauth(err) { if err := ri.reauthorizeAccount(ctx); err != nil { return err } err = f() } return err } blazer-0.6.1/b2/baseline.go000066400000000000000000000335421451327606200154260ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "context" "io" "net/http" "time" "github.com/Backblaze/blazer/base" ) // This file wraps the base package in a thin layer, for testing. It should be // the only file in b2 that imports base. type b2RootInterface interface { authorizeAccount(context.Context, string, string, clientOptions) error transient(error) bool backoff(error) time.Duration reauth(error) bool reupload(error) bool createBucket(context.Context, string, string, map[string]string, []LifecycleRule) (b2BucketInterface, error) listBuckets(context.Context, string) ([]b2BucketInterface, error) createKey(context.Context, string, []string, time.Duration, string, string) (b2KeyInterface, error) listKeys(context.Context, int, string) ([]b2KeyInterface, string, error) } type b2BucketInterface interface { name() string btype() string attrs() *BucketAttrs id() string updateBucket(context.Context, *BucketAttrs) error deleteBucket(context.Context) error getUploadURL(context.Context) (b2URLInterface, error) startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (b2LargeFileInterface, error) listFileNames(context.Context, int, string, string, string) ([]b2FileInterface, string, error) listFileVersions(context.Context, int, string, string, string, string) ([]b2FileInterface, string, string, error) listUnfinishedLargeFiles(context.Context, int, string) ([]b2FileInterface, string, error) downloadFileByName(context.Context, string, int64, int64, bool) (b2FileReaderInterface, error) hideFile(context.Context, string) (b2FileInterface, error) getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error) baseURL() string file(string, string) b2FileInterface } type b2URLInterface interface { reload(context.Context) error uploadFile(context.Context, io.Reader, int, string, string, string, map[string]string) (b2FileInterface, error) } type b2FileInterface interface { name() string id() string size() int64 timestamp() time.Time status() string deleteFileVersion(context.Context) error getFileInfo(context.Context) (b2FileInfoInterface, error) listParts(context.Context, int, int) ([]b2FilePartInterface, int, error) compileParts(int64, map[int]string) b2LargeFileInterface } type b2LargeFileInterface interface { finishLargeFile(context.Context) (b2FileInterface, error) getUploadPartURL(context.Context) (b2FileChunkInterface, error) cancel(context.Context) error } type b2FileChunkInterface interface { reload(context.Context) error uploadPart(context.Context, io.Reader, string, int, int) (int, error) } type b2FileReaderInterface interface { io.ReadCloser stats() (int, string, string, map[string]string) id() string } type b2FileInfoInterface interface { stats() (string, string, int64, string, map[string]string, string, time.Time) // bleck } type b2FilePartInterface interface { number() int sha1() string size() int64 } type b2KeyInterface interface { del(context.Context) error caps() []string name() string expires() time.Time secret() string id() string } type b2Root struct { b *base.B2 } type b2Bucket struct { b *base.Bucket } type b2URL struct { b *base.URL } type b2File struct { b *base.File } type b2LargeFile struct { b *base.LargeFile } type b2FileChunk struct { b *base.FileChunk } type b2FileReader struct { b *base.FileReader } type b2FileInfo struct { b *base.FileInfo } type b2FilePart struct { b *base.FilePart } type b2Key struct { b *base.Key } func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error { var aopts []base.AuthOption ct := &clientTransport{client: c.client} if c.transport != nil { ct.rt = c.transport } aopts = append(aopts, base.Transport(ct)) if c.failSomeUploads { aopts = append(aopts, base.FailSomeUploads()) } if c.expireTokens { aopts = append(aopts, base.ExpireSomeAuthTokens()) } if c.capExceeded { aopts = append(aopts, base.ForceCapExceeded()) } if c.apiBase != "" { aopts = append(aopts, base.SetAPIBase(c.apiBase)) } for _, agent := range c.userAgents { aopts = append(aopts, base.UserAgent(agent)) } nb, err := base.AuthorizeAccount(ctx, account, key, aopts...) if err != nil { return err } if b.b == nil { b.b = nb return nil } b.b.Update(nb) return nil } func (*b2Root) backoff(err error) time.Duration { if base.Action(err) != base.Retry { return 0 } return base.Backoff(err) } func (*b2Root) reauth(err error) bool { return base.Action(err) == base.ReAuthenticate } func (*b2Root) reupload(err error) bool { return base.Action(err) == base.AttemptNewUpload } func (*b2Root) transient(err error) bool { return base.Action(err) == base.Retry } func (b *b2Root) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (b2BucketInterface, error) { var baseRules []base.LifecycleRule for _, rule := range rules { baseRules = append(baseRules, base.LifecycleRule{ DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, Prefix: rule.Prefix, }) } bucket, err := b.b.CreateBucket(ctx, name, btype, info, baseRules) if err != nil { return nil, err } return &b2Bucket{bucket}, nil } func (b *b2Root) listBuckets(ctx context.Context, name string) ([]b2BucketInterface, error) { buckets, err := b.b.ListBuckets(ctx, name) if err != nil { return nil, err } var rtn []b2BucketInterface for _, bucket := range buckets { rtn = append(rtn, &b2Bucket{bucket}) } return rtn, err } func (b *b2Bucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error { if attrs == nil { return nil } if attrs.Type != UnknownType { b.b.Type = string(attrs.Type) } if attrs.Info != nil { b.b.Info = attrs.Info } if attrs.LifecycleRules != nil { rules := []base.LifecycleRule{} for _, rule := range attrs.LifecycleRules { rules = append(rules, base.LifecycleRule{ DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, Prefix: rule.Prefix, }) } b.b.LifecycleRules = rules } newBucket, err := b.b.Update(ctx) if err == nil { b.b = newBucket } code, _ := base.Code(err) if code == 409 { return b2err{ err: err, isUpdateConflict: true, } } return err } func (b *b2Root) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (b2KeyInterface, error) { k, err := b.b.CreateKey(ctx, name, caps, valid, bucketID, prefix) if err != nil { return nil, err } return &b2Key{k}, nil } func (b *b2Root) listKeys(ctx context.Context, max int, next string) ([]b2KeyInterface, string, error) { keys, next, err := b.b.ListKeys(ctx, max, next) if err != nil { return nil, "", err } var k []b2KeyInterface for _, key := range keys { k = append(k, &b2Key{key}) } return k, next, nil } func (b *b2Bucket) deleteBucket(ctx context.Context) error { return b.b.DeleteBucket(ctx) } func (b *b2Bucket) name() string { return b.b.Name } func (b *b2Bucket) btype() string { return b.b.Type } func (b *b2Bucket) attrs() *BucketAttrs { var rules []LifecycleRule for _, rule := range b.b.LifecycleRules { rules = append(rules, LifecycleRule{ DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, Prefix: rule.Prefix, }) } return &BucketAttrs{ LifecycleRules: rules, Info: b.b.Info, Type: BucketType(b.b.Type), } } func (b *b2Bucket) id() string { return b.b.ID } func (b *b2Bucket) getUploadURL(ctx context.Context) (b2URLInterface, error) { url, err := b.b.GetUploadURL(ctx) if err != nil { return nil, err } return &b2URL{url}, nil } func (b *b2Bucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (b2LargeFileInterface, error) { lf, err := b.b.StartLargeFile(ctx, name, ct, info) if err != nil { return nil, err } return &b2LargeFile{lf}, nil } func (b *b2Bucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]b2FileInterface, string, error) { fs, c, err := b.b.ListFileNames(ctx, count, continuation, prefix, delimiter) if err != nil { return nil, "", err } var files []b2FileInterface for _, f := range fs { files = append(files, &b2File{f}) } return files, c, nil } func (b *b2Bucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]b2FileInterface, string, string, error) { fs, name, id, err := b.b.ListFileVersions(ctx, count, nextName, nextID, prefix, delimiter) if err != nil { return nil, "", "", err } var files []b2FileInterface for _, f := range fs { files = append(files, &b2File{f}) } return files, name, id, nil } func (b *b2Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]b2FileInterface, string, error) { fs, cont, err := b.b.ListUnfinishedLargeFiles(ctx, count, continuation) if err != nil { return nil, "", err } var files []b2FileInterface for _, f := range fs { files = append(files, &b2File{f}) } return files, cont, nil } func (b *b2Bucket) downloadFileByName(ctx context.Context, name string, offset, size int64, header bool) (b2FileReaderInterface, error) { fr, err := b.b.DownloadFileByName(ctx, name, offset, size, header) if err != nil { code, _ := base.Code(err) switch code { case http.StatusRequestedRangeNotSatisfiable: return nil, errNoMoreContent case http.StatusNotFound: return nil, b2err{err: err, notFoundErr: true} } return nil, err } return &b2FileReader{fr}, nil } func (b *b2Bucket) hideFile(ctx context.Context, name string) (b2FileInterface, error) { f, err := b.b.HideFile(ctx, name) if err != nil { return nil, err } return &b2File{f}, nil } func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) { return b.b.GetDownloadAuthorization(ctx, p, v, s) } func (b *b2Bucket) baseURL() string { return b.b.BaseURL() } func (b *b2Bucket) file(id, name string) b2FileInterface { return &b2File{b.b.File(id, name)} } func (b *b2URL) uploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (b2FileInterface, error) { file, err := b.b.UploadFile(ctx, r, size, name, contentType, sha1, info) if err != nil { return nil, err } return &b2File{file}, nil } func (b *b2URL) reload(ctx context.Context) error { return b.b.Reload(ctx) } func (b *b2File) deleteFileVersion(ctx context.Context) error { return b.b.DeleteFileVersion(ctx) } func (b *b2File) name() string { return b.b.Name } func (b *b2File) id() string { return b.b.ID } func (b *b2File) size() int64 { return b.b.Size } func (b *b2File) timestamp() time.Time { return b.b.Timestamp } func (b *b2File) status() string { return b.b.Status } func (b *b2File) getFileInfo(ctx context.Context) (b2FileInfoInterface, error) { if b.b.Info != nil { return &b2FileInfo{b.b.Info}, nil } fi, err := b.b.GetFileInfo(ctx) if err != nil { return nil, err } return &b2FileInfo{fi}, nil } func (b *b2File) listParts(ctx context.Context, next, count int) ([]b2FilePartInterface, int, error) { parts, n, err := b.b.ListParts(ctx, next, count) if err != nil { return nil, 0, err } var rtn []b2FilePartInterface for _, part := range parts { rtn = append(rtn, &b2FilePart{part}) } return rtn, n, nil } func (b *b2File) compileParts(size int64, seen map[int]string) b2LargeFileInterface { return &b2LargeFile{b.b.CompileParts(size, seen)} } func (b *b2LargeFile) finishLargeFile(ctx context.Context) (b2FileInterface, error) { f, err := b.b.FinishLargeFile(ctx) if err != nil { return nil, err } return &b2File{f}, nil } func (b *b2LargeFile) getUploadPartURL(ctx context.Context) (b2FileChunkInterface, error) { c, err := b.b.GetUploadPartURL(ctx) if err != nil { return nil, err } return &b2FileChunk{c}, nil } func (b *b2LargeFile) cancel(ctx context.Context) error { return b.b.CancelLargeFile(ctx) } func (b *b2FileChunk) reload(ctx context.Context) error { return b.b.Reload(ctx) } func (b *b2FileChunk) uploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (int, error) { return b.b.UploadPart(ctx, r, sha1, size, index) } func (b *b2FileReader) Read(p []byte) (int, error) { return b.b.Read(p) } func (b *b2FileReader) Close() error { return b.b.Close() } func (b *b2FileReader) stats() (int, string, string, map[string]string) { return b.b.ContentLength, b.b.ContentType, b.b.SHA1, b.b.Info } func (b *b2FileReader) id() string { return b.b.ID } func (b *b2FileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) { return b.b.Name, b.b.SHA1, b.b.Size, b.b.ContentType, b.b.Info, b.b.Status, b.b.Timestamp } func (b *b2FilePart) number() int { return b.b.Number } func (b *b2FilePart) sha1() string { return b.b.SHA1 } func (b *b2FilePart) size() int64 { return b.b.Size } func (b *b2Key) del(ctx context.Context) error { return b.b.Delete(ctx) } func (b *b2Key) caps() []string { return b.b.Capabilities } func (b *b2Key) name() string { return b.b.Name } func (b *b2Key) expires() time.Time { return b.b.Expires } func (b *b2Key) secret() string { return b.b.Secret } func (b *b2Key) id() string { return b.b.ID } blazer-0.6.1/b2/buffer.go000066400000000000000000000106531451327606200151130ustar00rootroot00000000000000// Copyright 2017, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "bytes" "crypto/sha1" "errors" "fmt" "hash" "io" "io/ioutil" "os" "strings" "sync" ) type readResetter interface { Read([]byte) (int, error) Reset() error } type resetter struct { rs io.ReadSeeker } func (r resetter) Read(p []byte) (int, error) { return r.rs.Read(p) } func (r resetter) Reset() error { _, err := r.rs.Seek(0, 0); return err } func newResetter(p []byte) readResetter { return resetter{rs: bytes.NewReader(p)} } type writeBuffer interface { io.Writer Len() int Reader() (readResetter, error) Hash() string // sha1 or whatever it is Close() error } // nonBuffer doesn't buffer anything, but passes values directly from the // source readseeker. Many nonBuffers can point at different parts of the same // underlying source, and be accessed by multiple goroutines simultaneously. func newNonBuffer(rs io.ReaderAt, offset, size int64) writeBuffer { return &nonBuffer{ r: io.NewSectionReader(rs, offset, size), size: int(size), hsh: sha1.New(), } } type nonBuffer struct { r *io.SectionReader size int hsh hash.Hash isEOF bool buf *strings.Reader } func (nb *nonBuffer) Len() int { return nb.size + 40 } func (nb *nonBuffer) Hash() string { return "hex_digits_at_end" } func (nb *nonBuffer) Close() error { return nil } func (nb *nonBuffer) Reader() (readResetter, error) { return nb, nil } func (nb *nonBuffer) Write([]byte) (int, error) { return 0, errors.New("writes not supported") } func (nb *nonBuffer) Read(p []byte) (int, error) { if nb.isEOF { return nb.buf.Read(p) } n, err := io.TeeReader(nb.r, nb.hsh).Read(p) if err == io.EOF { err = nil nb.isEOF = true nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.hsh.Sum(nil))) } return n, err } func (nb *nonBuffer) Reset() error { nb.hsh.Reset() nb.isEOF = false _, err := nb.r.Seek(0, 0) return err } type memoryBuffer struct { buf *bytes.Buffer hsh hash.Hash w io.Writer mux sync.Mutex } var bufpool *sync.Pool func init() { bufpool = &sync.Pool{} bufpool.New = func() interface{} { return &bytes.Buffer{} } } func newMemoryBuffer() *memoryBuffer { mb := &memoryBuffer{ hsh: sha1.New(), } mb.buf = bufpool.Get().(*bytes.Buffer) mb.w = io.MultiWriter(mb.hsh, mb.buf) return mb } func (mb *memoryBuffer) Write(p []byte) (int, error) { return mb.w.Write(p) } func (mb *memoryBuffer) Len() int { return mb.buf.Len() } func (mb *memoryBuffer) Reader() (readResetter, error) { return newResetter(mb.buf.Bytes()), nil } func (mb *memoryBuffer) Hash() string { return fmt.Sprintf("%x", mb.hsh.Sum(nil)) } func (mb *memoryBuffer) Close() error { mb.mux.Lock() defer mb.mux.Unlock() if mb.buf == nil { return nil } mb.buf.Truncate(0) bufpool.Put(mb.buf) mb.buf = nil return nil } type fileBuffer struct { f *os.File hsh hash.Hash w io.Writer s int } func newFileBuffer(loc string) (*fileBuffer, error) { f, err := ioutil.TempFile(loc, "blazer") if err != nil { return nil, err } fb := &fileBuffer{ f: f, hsh: sha1.New(), } fb.w = io.MultiWriter(fb.f, fb.hsh) return fb, nil } func (fb *fileBuffer) Write(p []byte) (int, error) { n, err := fb.w.Write(p) fb.s += n return n, err } func (fb *fileBuffer) Len() int { return fb.s } func (fb *fileBuffer) Hash() string { return fmt.Sprintf("%x", fb.hsh.Sum(nil)) } func (fb *fileBuffer) Reader() (readResetter, error) { if _, err := fb.f.Seek(0, 0); err != nil { return nil, err } return &fr{f: fb.f}, nil } func (fb *fileBuffer) Close() error { fb.f.Close() return os.Remove(fb.f.Name()) } // wraps *os.File so that the http package doesn't see it as an io.Closer type fr struct { f *os.File } func (r *fr) Read(p []byte) (int, error) { return r.f.Read(p) } func (r *fr) Reset() error { _, err := r.f.Seek(0, 0); return err } blazer-0.6.1/b2/integration_test.go000066400000000000000000000722111451327606200172220ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "bytes" "context" "crypto/rand" "crypto/sha1" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "os" "reflect" "strings" "sync/atomic" "testing" "time" "github.com/Backblaze/blazer/internal/blog" "github.com/Backblaze/blazer/x/transport" ) const ( apiID = "B2_ACCOUNT_ID" apiKey = "B2_SECRET_KEY" errVar = "B2_TRANSIENT_ERRORS" ) func TestReadWriteLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() sobj, wsha, err := writeFile(ctx, bucket, smallFileName, 1e6-42, 1e8) if err != nil { t.Fatal(err) } lobj, wshaL, err := writeFile(ctx, bucket, largeFileName, 5e6+5e4, 5e6) if err != nil { t.Fatal(err) } if err := readFile(ctx, lobj, wshaL, 1e6, 10); err != nil { t.Error(err) } if err := readFile(ctx, sobj, wsha, 1e5, 10); err != nil { t.Error(err) } iter := bucket.List(ctx, ListHidden()) for iter.Next() { if err := iter.Object().Delete(ctx); err != nil { t.Error(err) } } if err := iter.Err(); err != nil { t.Error(err) } } func TestReaderFromLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() table := []struct { size, pos int64 csize, writers int }{ { // that it works at all size: 10, }, { // large uploads size: 15e6 + 10, csize: 5e6, writers: 2, }, { // an excess of writers size: 50e6, csize: 5e6, writers: 12, }, { // with offset, seeks back to start after turning it into a ReaderAt size: 250, pos: 50, }, } for i, e := range table { rs := &zReadSeeker{pos: e.pos, size: e.size} o := bucket.Object(fmt.Sprintf("writer.%d", i)) w := o.NewWriter(ctx) w.ChunkSize = e.csize w.ConcurrentUploads = e.writers n, err := w.ReadFrom(rs) if err != nil { t.Errorf("ReadFrom(): %v", err) } if n != e.size { t.Errorf("ReadFrom(): got %d bytes, wanted %d bytes", n, e.size) } if err := w.Close(); err != nil { t.Errorf("w.Close(): %v", err) continue } r := o.NewReader(ctx) h := sha1.New() rn, err := io.Copy(h, r) if err != nil { t.Errorf("Read from B2: %v", err) } if rn != n { t.Errorf("Read from B2: got %d bytes, want %d bytes", rn, n) } if err, ok := r.Verify(); ok && err != nil { t.Errorf("Read from B2: %v", err) } if err := r.Close(); err != nil { t.Errorf("r.Close(): %v", err) } hex := fmt.Sprintf("%x", h.Sum(nil)) attrs, err := o.Attrs(ctx) if err != nil { t.Errorf("Attrs(): %v", err) continue } if attrs.SHA1 == "none" { continue } if hex != attrs.SHA1 { t.Errorf("SHA1: got %q, want %q", hex, attrs.SHA1) } } } func TestHideShowLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() // write a file obj, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8) if err != nil { t.Fatal(err) } got, err := countObjects(bucket.List(ctx)) if err != nil { t.Error(err) } if got != 1 { t.Fatalf("got %d objects, wanted 1", got) } // When the hide marker and the object it's hiding were created within the // same second, they can be sorted in the wrong order, causing the object to // fail to be hidden. time.Sleep(1500 * time.Millisecond) // hide the file if err := obj.Hide(ctx); err != nil { t.Fatal(err) } got, err = countObjects(bucket.List(ctx)) if err != nil { t.Error(err) } if got != 0 { t.Fatalf("got %d objects, wanted 0", got) } // unhide the file if err := bucket.Reveal(ctx, smallFileName); err != nil { t.Fatal(err) } // count see the object again got, err = countObjects(bucket.List(ctx)) if err != nil { t.Error(err) } if got != 1 { t.Fatalf("got %d objects, wanted 1", got) } } type cancelReader struct { r io.Reader n, l int c func() } func (c *cancelReader) Read(p []byte) (int, error) { n, err := c.r.Read(p) c.n += n if c.n >= c.l { c.c() } return n, err } func TestResumeWriter(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) bucket, _ := startLiveTest(ctx, t) w := bucket.Object("foo").NewWriter(ctx) w.ChunkSize = 5e6 r := &cancelReader{ r: io.LimitReader(zReader{}, 15e6), l: 6e6, c: cancel, } if _, err := io.Copy(w, r); err != context.Canceled { t.Fatalf("io.Copy: wanted canceled context, got: %v", err) } ctx2 := context.Background() ctx2, cancel2 := context.WithTimeout(ctx2, 10*time.Minute) defer cancel2() bucket2, done := startLiveTest(ctx2, t) defer done() w2 := bucket2.Object("foo").NewWriter(ctx2) w2.ChunkSize = 5e6 r2 := io.LimitReader(zReader{}, 15e6) h1 := sha1.New() tr := io.TeeReader(r2, h1) w2.Resume = true w2.ConcurrentUploads = 2 if _, err := io.Copy(w2, tr); err != nil { t.Fatal(err) } if err := w2.Close(); err != nil { t.Fatal(err) } begSHA := fmt.Sprintf("%x", h1.Sum(nil)) objR := bucket2.Object("foo").NewReader(ctx2) objR.ConcurrentDownloads = 3 h2 := sha1.New() if _, err := io.Copy(h2, objR); err != nil { t.Fatal(err) } if err := objR.Close(); err != nil { t.Error(err) } endSHA := fmt.Sprintf("%x", h2.Sum(nil)) if endSHA != begSHA { t.Errorf("got conflicting hashes: got %q, want %q", endSHA, begSHA) } } func TestResumeWriterWithoutExtantFile(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() r := io.LimitReader(zReader{}, 15e6) w := bucket.Object("foo").NewWriter(ctx) w.ChunkSize = 5e6 w.Resume = true if _, err := io.Copy(w, r); err != nil { t.Fatalf("io.Copy: %v", err) } } func TestAttrs(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() attrlist := []*Attrs{ &Attrs{ ContentType: "jpeg/stream", Info: map[string]string{ "one": "a", "two": "b", }, }, &Attrs{ ContentType: "application/MAGICFACE", LastModified: time.Unix(1464370149, 142000000), Info: map[string]string{}, // can't be nil }, &Attrs{ ContentType: "arbitrarystring", Info: map[string]string{ "spaces": "string with spaces", "unicode": "日本語", "special": "&/!@_.~", }, }, } table := []struct { name string size int64 }{ { name: "small", size: 1e3, }, { name: "large", size: 5e6 + 4, }, } for _, e := range table { for _, attrs := range attrlist { o := bucket.Object(e.name) w := o.NewWriter(ctx, WithAttrsOption(attrs)) w.ChunkSize = 5e6 if _, err := io.Copy(w, io.LimitReader(zReader{}, e.size)); err != nil { t.Error(err) continue } if err := w.Close(); err != nil { t.Error(err) continue } gotAttrs, err := bucket.Object(e.name).Attrs(ctx) if err != nil { t.Error(err) continue } if gotAttrs.ContentType != attrs.ContentType { t.Errorf("bad content-type for %s: got %q, want %q", e.name, gotAttrs.ContentType, attrs.ContentType) } if !reflect.DeepEqual(gotAttrs.Info, attrs.Info) { t.Errorf("bad info for %s: got %#v, want %#v", e.name, gotAttrs.Info, attrs.Info) } if !gotAttrs.LastModified.Equal(attrs.LastModified) { t.Errorf("bad lastmodified time for %s: got %v, want %v", e.name, gotAttrs.LastModified, attrs.LastModified) } if err := o.Delete(ctx); err != nil { t.Errorf("Object(%q).Delete: %v", e.name, err) } } } } func TestFileBufferLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() r := io.LimitReader(zReader{}, 1e6) w := bucket.Object("small").NewWriter(ctx) w.UseFileBuffer = true if _, err := io.CopyN(w, r, 1); err != nil { t.Fatalf("CopyN: %v", err) } wb, ok := w.w.(*fileBuffer) if !ok { t.Fatalf("writer isn't using file buffer: %T", w.w) } smallTmpName := wb.f.Name() if _, err := io.Copy(w, r); err != nil { t.Errorf("creating small file: %v", err) } if err := w.Close(); err != nil { t.Errorf("w.Close(): %v", err) } if _, err := os.Stat(smallTmpName); !os.IsNotExist(err) { t.Errorf("tmp file exists (%s) or other error: %v", smallTmpName, err) } } func TestAuthTokLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() foo := "foo/bar" baz := "baz/bar" fw := bucket.Object(foo).NewWriter(ctx) io.Copy(fw, io.LimitReader(zReader{}, 1e5)) if err := fw.Close(); err != nil { t.Fatal(err) } bw := bucket.Object(baz).NewWriter(ctx) io.Copy(bw, io.LimitReader(zReader{}, 1e5)) if err := bw.Close(); err != nil { t.Fatal(err) } tok, err := bucket.AuthToken(ctx, "foo", time.Hour) if err != nil { t.Fatal(err) } furl := fmt.Sprintf("%s?Authorization=%s", bucket.Object(foo).URL(), tok) frsp, err := http.Get(furl) if err != nil { t.Fatal(err) } if frsp.StatusCode != 200 { t.Fatalf("%s: got %s, want 200", furl, frsp.Status) } burl := fmt.Sprintf("%s?Authorization=%s", bucket.Object(baz).URL(), tok) brsp, err := http.Get(burl) if err != nil { t.Fatal(err) } if brsp.StatusCode != 401 { t.Fatalf("%s: got %s, want 401", burl, brsp.Status) } } func TestObjAuthTokLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() table := []struct { obj string d time.Duration b2cd string }{ { obj: "foo/bar", d: time.Minute, }, { obj: "foo2/thing.pdf", d: time.Minute, b2cd: "attachment", }, { obj: "foo2/thing.pdf", d: time.Minute, b2cd: `attachment; filename="what.png"`, }, } for _, e := range table { fw := bucket.Object(e.obj).NewWriter(ctx) io.Copy(fw, io.LimitReader(zReader{}, 1e5)) if err := fw.Close(); err != nil { t.Fatal(err) } url, err := bucket.Object(e.obj).AuthURL(ctx, e.d, e.b2cd) if err != nil { t.Fatal(err) } blog.V(2).Infof("downloading %s", url.String()) frsp, err := http.Get(url.String()) if err != nil { t.Fatal(err) } if frsp.StatusCode != 200 { t.Fatalf("%s: got %s, want 200", url.String(), frsp.Status) } } } func TestRangeReaderLive(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() buf := &bytes.Buffer{} io.Copy(buf, io.LimitReader(zReader{}, 3e6)) rs := bytes.NewReader(buf.Bytes()) w := bucket.Object("foobar").NewWriter(ctx) if _, err := io.Copy(w, rs); err != nil { t.Fatal(err) } if err := w.Close(); err != nil { t.Fatal(err) } table := []struct { offset, length int64 size int64 // expected actual size }{ { offset: 1e6 - 50, length: 1e6 + 50, size: 1e6 + 50, }, { offset: 0, length: -1, size: 3e6, }, { offset: 2e6, length: -1, size: 1e6, }, { offset: 2e6, length: 2e6, size: 1e6, }, { offset: 0, length: 4e6, size: 3e6, }, } for _, e := range table { if _, err := rs.Seek(e.offset, 0); err != nil { t.Error(err) continue } hw := sha1.New() var lr io.Reader lr = rs if e.length >= 0 { lr = io.LimitReader(rs, e.length) } if _, err := io.Copy(hw, lr); err != nil { t.Error(err) continue } r := bucket.Object("foobar").NewRangeReader(ctx, e.offset, e.length) defer r.Close() hr := sha1.New() read, err := io.Copy(hr, r) if err != nil { t.Error(err) continue } if read != e.size { t.Errorf("NewRangeReader(_, %d, %d): read %d bytes, wanted %d bytes", e.offset, e.length, read, e.size) } got := fmt.Sprintf("%x", hr.Sum(nil)) want := fmt.Sprintf("%x", hw.Sum(nil)) if got != want { t.Errorf("NewRangeReader(_, %d, %d): got %q, want %q", e.offset, e.length, got, want) } if err, ok := r.Verify(); ok && err != nil { t.Errorf("NewRangeReader(_, %d, %d): %v", e.offset, e.length, err) } } } func TestListObjectsWithPrefix(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() foo := "foo/bar" baz := "baz/bar" fw := bucket.Object(foo).NewWriter(ctx) io.Copy(fw, io.LimitReader(zReader{}, 1e5)) if err := fw.Close(); err != nil { t.Fatal(err) } bw := bucket.Object(baz).NewWriter(ctx) io.Copy(bw, io.LimitReader(zReader{}, 1e5)) if err := bw.Close(); err != nil { t.Fatal(err) } table := []struct { opts []ListOption }{ { opts: []ListOption{ ListPrefix("baz/"), }, }, { opts: []ListOption{ ListPrefix("baz/"), ListHidden(), }, }, } for _, entry := range table { iter := bucket.List(ctx, entry.opts...) var res []string for iter.Next() { o := iter.Object() attrs, err := o.Attrs(ctx) if err != nil { t.Errorf("(%v).Attrs: %v", o, err) continue } res = append(res, attrs.Name) } if iter.Err() != nil { t.Errorf("iter.Err(): %v", iter.Err()) } want := []string{"baz/bar"} if !reflect.DeepEqual(res, want) { t.Errorf("got %v, want %v", res, want) } } } func compare(a, b *BucketAttrs) bool { if a == nil { a = &BucketAttrs{} } if b == nil { b = &BucketAttrs{} } if a.Type != b.Type && !((a.Type == "" && b.Type == Private) || (a.Type == Private && b.Type == "")) { return false } if !reflect.DeepEqual(a.Info, b.Info) && (len(a.Info) > 0 || len(b.Info) > 0) { return false } return reflect.DeepEqual(a.LifecycleRules, b.LifecycleRules) } func TestNewBucket(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) defer cancel() client, err := NewClient(ctx, id, key) if err != nil { t.Fatal(err) } table := []struct { name string attrs *BucketAttrs }{ { name: "no-attrs", }, { name: "only-rules", attrs: &BucketAttrs{ LifecycleRules: []LifecycleRule{ { Prefix: "whee/", DaysHiddenUntilDeleted: 30, }, { Prefix: "whoa/", DaysNewUntilHidden: 1, }, }, }, }, { name: "only-info", attrs: &BucketAttrs{ Info: map[string]string{ "this": "that", "other": "thing", }, }, }, } for _, ent := range table { bucket, err := client.NewBucket(ctx, id+"-"+ent.name, ent.attrs) if err != nil { t.Errorf("%s: NewBucket(%v): %v", ent.name, ent.attrs, err) continue } defer bucket.Delete(ctx) if err := bucket.Update(ctx, nil); err != nil { t.Errorf("%s: Update(ctx, nil): %v", ent.name, err) continue } attrs, err := bucket.Attrs(ctx) if err != nil { t.Errorf("%s: Attrs(ctx): %v", ent.name, err) continue } if !compare(attrs, ent.attrs) { t.Errorf("%s: attrs disagree: got %v, want %v", ent.name, attrs, ent.attrs) } } } func TestDuelingBuckets(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() bucket2, done2 := startLiveTest(ctx, t) defer done2() attrs, err := bucket.Attrs(ctx) if err != nil { t.Fatal(err) } attrs2, err := bucket2.Attrs(ctx) if err != nil { t.Fatal(err) } attrs.Info["food"] = "yum" if err := bucket.Update(ctx, attrs); err != nil { t.Fatal(err) } attrs2.Info["nails"] = "not" if err := bucket2.Update(ctx, attrs2); !IsUpdateConflict(err) { t.Fatalf("bucket.Update should have failed with IsUpdateConflict; instead failed with %v", err) } attrs2, err = bucket2.Attrs(ctx) if err != nil { t.Fatal(err) } attrs2.Info["nails"] = "not" if err := bucket2.Update(ctx, nil); err != nil { t.Fatal(err) } if err := bucket2.Update(ctx, attrs2); err != nil { t.Fatal(err) } } func TestNotExist(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() if _, err := bucket.Object("not there").Attrs(ctx); !IsNotExist(err) { t.Errorf("IsNotExist() on nonexistent object returned false (%v)", err) } } func TestWriteEmpty(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() _, _, err := writeFile(ctx, bucket, smallFileName, 0, 1e8) if err != nil { t.Fatal(err) } } func TestAttrsNoRoundtrip(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() _, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8) if err != nil { t.Fatal(err) } iter := bucket.List(ctx) iter.Next() obj := iter.Object() var trips int for range bucket.c.Status().table()["1m"] { trips++ } attrs, err := obj.Attrs(ctx) if err != nil { t.Fatal(err) } if attrs.Name != smallFileName { t.Errorf("got the wrong object: got %q, want %q", attrs.Name, smallFileName) } var newTrips int for range bucket.c.Status().table()["1m"] { newTrips++ } if trips != newTrips { t.Errorf("Attrs() should not have caused any net traffic, but it did: old %d, new %d", trips, newTrips) } } /*func TestAttrsFewRoundtrips(t *testing.T) { rt := &rtCounter{rt: defaultTransport} defaultTransport = rt defer func() { defaultTransport = rt.rt }() ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() _, _, err := writeFile(ctx, bucket, smallFileName, 42, 1e8) if err != nil { t.Fatal(err) } o := bucket.Object(smallFileName) trips := rt.trips attrs, err := o.Attrs(ctx) if err != nil { t.Fatal(err) } if attrs.Name != smallFileName { t.Errorf("got the wrong object: got %q, want %q", attrs.Name, smallFileName) } if trips != rt.trips { t.Errorf("Attrs(): too many round trips, got %d, want 1", rt.trips-trips) } }*/ func TestSmallUploadsFewRoundtrips(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() for i := 0; i < 10; i++ { _, _, err := writeFile(ctx, bucket, fmt.Sprintf("%s.%d", smallFileName, i), 42, 1e8) if err != nil { t.Fatal(err) } } si := bucket.c.Status() getURL := si.RPCs[0].CountByMethod()["b2_get_upload_url"] uploadFile := si.RPCs[0].CountByMethod()["b2_upload_file"] if getURL >= uploadFile { t.Errorf("too many calls to b2_get_upload_url") } } func TestDeleteWithoutName(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() _, _, err := writeFile(ctx, bucket, smallFileName, 1e6+42, 1e8) if err != nil { t.Fatal(err) } if err := bucket.Object(smallFileName).Delete(ctx); err != nil { t.Fatal(err) } } func TestZeroByteObject(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() bucket, done := startLiveTest(ctx, t) defer done() _, _, err := writeFile(ctx, bucket, smallFileName, 0, 0) if err != nil { t.Fatal(err) } if err := bucket.Object(smallFileName).Delete(ctx); err != nil { t.Fatal(err) } } func TestListUnfinishedLargeFiles(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() w := bucket.Object(largeFileName).NewWriter(ctx) w.ChunkSize = 1e5 if _, err := io.Copy(w, io.LimitReader(zReader{}, 1e6)); err != nil { t.Fatal(err) } iter := bucket.List(ctx, ListUnfinished()) if !iter.Next() { t.Errorf("ListUnfinishedLargeFiles: got none, want 1 (error %v)", iter.Err()) } } func TestReauthPreservesOptions(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() first := bucket.r.(*beRoot).options if err := bucket.r.reauthorizeAccount(ctx); err != nil { t.Fatalf("reauthorizeAccount: %v", err) } second := bucket.r.(*beRoot).options if !reflect.DeepEqual(first, second) { // Test that they are literally the same set of options, which is an // implementation detail but is fine for now. t.Errorf("options mismatch: got %v, want %v", second, first) } } func TestVerifyReader(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() table := []struct { name string fakeSHA string size int64 off, len int64 valid bool }{ { name: "first", size: 100, off: 0, len: -1, valid: true, }, { name: "second", size: 100, off: 0, len: 100, valid: true, }, { name: "third", size: 100, off: 0, len: 99, valid: false, }, { name: "fourth", size: 5e6 + 100, off: 0, len: -1, valid: false, }, { name: "fifth", size: 5e6 + 100, off: 0, len: -1, fakeSHA: "fbc815f2d6518858dec83ccb46263875fc894d88", valid: true, }, } for _, e := range table { o := bucket.Object(e.name) var opts []WriterOption if e.fakeSHA != "" { opts = append(opts, WithAttrsOption(&Attrs{SHA1: e.fakeSHA})) } w := o.NewWriter(ctx, opts...) w.ChunkSize = 5e6 if _, err := io.Copy(w, io.LimitReader(zReader{}, e.size)); err != nil { t.Error(err) continue } if err := w.Close(); err != nil { t.Error(err) continue } r := o.NewRangeReader(ctx, e.off, e.len) if _, err := io.Copy(ioutil.Discard, r); err != nil { t.Error(err) } err, ok := r.Verify() if ok != e.valid { t.Errorf("%s: bad validity: got %v, want %v", e.name, ok, e.valid) } if e.valid && err != nil { t.Errorf("%s does not verify: %v", e.name, err) } } } func TestListBucketsWithKey(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() key, err := bucket.CreateKey(ctx, "testKey", Capabilities("listBuckets")) if err != nil { t.Fatal(err) } client, err := NewClient(ctx, key.ID(), key.Secret()) if err != nil { t.Fatal(err) } if _, err := client.Bucket(ctx, bucket.Name()); err != nil { t.Fatal(err) } } func TestListBucketContentsWithKey(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() for _, path := range []string{"foo/bar", "foo/baz", "foo", "bar", "baz"} { if _, _, err := writeFile(ctx, bucket, path, 1, 1e8); err != nil { t.Fatal(err) } } key, err := bucket.CreateKey(ctx, "testKey", Capabilities("listBuckets", "listFiles"), Prefix("foo/")) if err != nil { t.Fatal(err) } client, err := NewClient(ctx, key.ID(), key.Secret()) if err != nil { t.Fatal(err) } obucket, err := client.Bucket(ctx, bucket.Name()) if err != nil { t.Fatal(err) } iter := obucket.List(ctx) var got []string for iter.Next() { got = append(got, iter.Object().Name()) } if iter.Err() != nil { t.Fatal(iter.Err()) } want := []string{"foo/bar", "foo/baz"} if !reflect.DeepEqual(got, want) { t.Errorf("error listing objects with restricted key: got %v, want %v", got, want) } iter2 := obucket.List(ctx, ListHidden()) for iter2.Next() { } if iter2.Err() != nil { t.Error(iter2.Err()) } } func TestCreateDeleteKey(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() table := []struct { d time.Duration e time.Time bucket bool cap []string pfx string }{ { cap: []string{"deleteKeys"}, }, { d: time.Minute, cap: []string{"deleteKeys"}, pfx: "prefox", }, { e: time.Now().Add(time.Minute), // cap: []string{"writeFiles", "listFiles"}, bucket: true, }, { d: time.Minute, cap: []string{"writeFiles", "listFiles"}, pfx: "prefox", bucket: true, }, } for _, e := range table { var opts []KeyOption opts = append(opts, Capabilities(e.cap...)) if e.d != 0 { opts = append(opts, Lifetime(e.d)) } if !e.e.IsZero() { opts = append(opts, Deadline(e.e)) } var key *Key if e.bucket { opts = append(opts, Prefix(e.pfx)) bkey, err := bucket.CreateKey(ctx, "whee", opts...) if err != nil { t.Errorf("Bucket.CreateKey(%v, %v): %v", bucket.Name(), e, err) continue } key = bkey } else { gkey, err := bucket.c.CreateKey(ctx, "whee", opts...) if err != nil { t.Errorf("Client.CreateKey(%v): %v", e, err) continue } key = gkey } if err := key.Delete(ctx); err != nil { t.Errorf("key.Delete(): %v", err) } } } func TestListKeys(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() n := 20 for i := 0; i < n; i++ { key, err := bucket.CreateKey(ctx, fmt.Sprintf("%d-list-key-test", i), Capabilities("listBuckets")) if err != nil { t.Fatalf("CreateKey(%d): %v", i, err) } defer key.Delete(ctx) } var got []string var cur string for { ks, c, err := bucket.c.ListKeys(ctx, 10, cur) if err != nil && err != io.EOF { t.Fatalf("ListKeys(): %v", err) } for _, k := range ks { if strings.HasSuffix(k.Name(), "list-key-test") { got = append(got, k.Name()) } } cur = c if err == io.EOF { break } } if len(got) != n { t.Errorf("ListKeys(): got %d, want %d: %v", len(got), n, got) } } func TestEmptyObject(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() obj := bucket.Object("empty") w := obj.NewWriter(ctx) if _, err := w.Write([]byte{}); err != nil { t.Fatalf("Write: %v", err) } if err := w.Close(); err != nil { t.Fatalf("Close: %v", err) } attrs, err := obj.Attrs(ctx) if err != nil { t.Fatalf("Attrs: %v", err) } if attrs.Size != 0 { t.Fatalf("Unexpected object size: got %d, want 0", attrs.Size) } } type object struct { o *Object err error } func countObjects(iter *ObjectIterator) (int, error) { var got int for iter.Next() { got++ } return got, iter.Err() } var defaultTransport = http.DefaultTransport type eofTripper struct { rt http.RoundTripper t *testing.T } func (et eofTripper) RoundTrip(req *http.Request) (*http.Response, error) { resp, err := et.rt.RoundTrip(req) if err != nil { return nil, err } resp.Body = &eofReadCloser{rc: resp.Body, t: et.t} return resp, nil } type eofReadCloser struct { rc io.ReadCloser eof bool t *testing.T } func (eof *eofReadCloser) Read(p []byte) (int, error) { n, err := eof.rc.Read(p) if err == io.EOF { eof.eof = true } return n, err } func (eof *eofReadCloser) Close() error { if !eof.eof { eof.t.Error("http body closed with bytes unread") } return eof.rc.Close() } // Checks that close is called. type ccTripper struct { t *testing.T rt http.RoundTripper trips int64 } func (cc *ccTripper) RoundTrip(req *http.Request) (*http.Response, error) { resp, err := cc.rt.RoundTrip(req) if err != nil { return nil, err } atomic.AddInt64(&cc.trips, 1) resp.Body = &ccRC{ReadCloser: resp.Body, c: &cc.trips} return resp, err } func (cc *ccTripper) done() { if cc.trips != 0 { cc.t.Errorf("failed to close %d HTTP bodies", cc.trips) } } type ccRC struct { io.ReadCloser c *int64 } func (cc *ccRC) Close() error { atomic.AddInt64(cc.c, -1) return cc.ReadCloser.Close() } var uniq string func init() { b := make([]byte, 4) if _, err := rand.Read(b); err != nil { panic(err) } uniq = hex.EncodeToString(b) } func startLiveTest(ctx context.Context, t *testing.T) (*Bucket, func()) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") return nil, nil } ccport := &ccTripper{rt: defaultTransport, t: t} tport := eofTripper{rt: ccport, t: t} errport := transport.WithFailures(tport, transport.FailureRate(.25), transport.MatchPathSubstring("/b2_get_upload_url"), transport.Response(503)) client, err := NewClient(ctx, id, key, FailSomeUploads(), ExpireSomeAuthTokens(), Transport(errport), UserAgent("b2-test"), UserAgent("integration-test")) if err != nil { t.Fatal(err) return nil, nil } bucket, err := client.NewBucket(ctx, fmt.Sprintf("%s-%s-%s", id, bucketName, uniq), nil) if err != nil { t.Fatal(err) return nil, nil } f := func() { defer ccport.done() iter := bucket.List(ctx, ListHidden()) for iter.Next() { if err := iter.Object().Delete(ctx); err != nil { t.Error(err) } } if err := iter.Err(); err != nil && !IsNotExist(err) { t.Errorf("%#v", err) } if err := bucket.Delete(ctx); err != nil && !IsNotExist(err) { t.Error(err) } } return bucket, f } blazer-0.6.1/b2/iterator.go000066400000000000000000000200351451327606200154660ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "context" "io" "sync" ) // List returns an iterator for selecting objects in a bucket. The default // behavior, with no options, is to list all currently un-hidden objects. func (b *Bucket) List(ctx context.Context, opts ...ListOption) *ObjectIterator { o := &ObjectIterator{ bucket: b, ctx: ctx, } for _, opt := range opts { opt(&o.opts) } return o } // ObjectIterator abtracts away the tricky bits of iterating over a bucket's // contents. // // It is intended to be called in a loop: // for iter.Next() { // obj := iter.Object() // // act on obj // } // if err := iter.Err(); err != nil { // // handle err // } type ObjectIterator struct { bucket *Bucket ctx context.Context final bool err error idx int c *cursor opts objectIteratorOptions objs []*Object init sync.Once l lister count int } type lister func(context.Context, int, *cursor) ([]*Object, *cursor, error) func (o *ObjectIterator) page(ctx context.Context) error { if o.opts.locker != nil { o.opts.locker.Lock() defer o.opts.locker.Unlock() } objs, c, err := o.l(ctx, o.count, o.c) if err != nil && err != io.EOF { if bNotExist.MatchString(err.Error()) { return b2err{ err: err, notFoundErr: true, } } return err } o.c = c o.objs = objs o.idx = 0 if err == io.EOF { o.final = true } return nil } // Next advances the iterator to the next object. It should be called before // any calls to Object(). If Next returns true, then the next call to Object() // will be valid. Once Next returns false, it is important to check the return // value of Err(). func (o *ObjectIterator) Next() bool { o.init.Do(func() { o.count = o.opts.pageSize if o.count < 0 || o.count > 1000 { o.count = 1000 } switch { case o.opts.unfinished: o.l = o.bucket.listUnfinishedLargeFiles if o.count > 100 { o.count = 100 } case o.opts.hidden: o.l = o.bucket.listObjects default: o.l = o.bucket.listCurrentObjects } o.c = &cursor{ prefix: o.opts.prefix, delimiter: o.opts.delimiter, } }) if o.err != nil { return false } if o.ctx.Err() != nil { o.err = o.ctx.Err() return false } if o.idx >= len(o.objs) { if o.final { o.err = io.EOF return false } if err := o.page(o.ctx); err != nil { o.err = err return false } return o.Next() } o.idx++ return true } // Object returns the current object. func (o *ObjectIterator) Object() *Object { return o.objs[o.idx-1] } // Err returns the current error or nil. If Next() returns false and Err() is // nil, then all objects have been seen. func (o *ObjectIterator) Err() error { if o.err == io.EOF { return nil } return o.err } type objectIteratorOptions struct { hidden bool unfinished bool prefix string delimiter string pageSize int locker sync.Locker } // A ListOption alters the default behavor of List. type ListOption func(*objectIteratorOptions) // ListHidden will include hidden objects in the output. func ListHidden() ListOption { return func(o *objectIteratorOptions) { o.hidden = true } } // ListUnfinished will list unfinished large file operations instead of // existing objects. func ListUnfinished() ListOption { return func(o *objectIteratorOptions) { o.unfinished = true } } // ListPrefix will restrict the output to objects whose names begin with // prefix. func ListPrefix(pfx string) ListOption { return func(o *objectIteratorOptions) { o.prefix = pfx } } // ListDelimiter denotes the path separator. If set, object listings will be // truncated at this character. // // For example, if the bucket contains objects foo/bar, foo/baz, and foo, // then a delimiter of "/" will cause the listing to return "foo" and "foo/". // Otherwise, the listing would have returned all object names. // // Note that objects returned that end in the delimiter may not be actual // objects, e.g. you cannot read from (or write to, or delete) an object // "foo/", both because no actual object exists and because B2 disallows object // names that end with "/". If you want to ensure that all objects returned // are actual objects, leave this unset. func ListDelimiter(delimiter string) ListOption { return func(o *objectIteratorOptions) { o.delimiter = delimiter } } // ListPageSize configures the iterator to request the given number of objects // per network round-trip. The default (and maximum) is 1000 objects, except // for unfinished large files, which is 100. func ListPageSize(count int) ListOption { return func(o *objectIteratorOptions) { o.pageSize = count } } // ListLocker passes the iterator a lock which will be held during network // round-trips. func ListLocker(l sync.Locker) ListOption { return func(o *objectIteratorOptions) { o.locker = l } } type cursor struct { // Prefix limits the listed objects to those that begin with this string. prefix string // Delimiter denotes the path separator. If set, object listings will be // truncated at this character. // // For example, if the bucket contains objects foo/bar, foo/baz, and foo, // then a delimiter of "/" will cause the listing to return "foo" and "foo/". // Otherwise, the listing would have returned all object names. // // Note that objects returned that end in the delimiter may not be actual // objects, e.g. you cannot read from (or write to, or delete) an object "foo/", // both because no actual object exists and because B2 disallows object names // that end with "/". If you want to ensure that all objects returned by // ListObjects and ListCurrentObjects are actual objects, leave this unset. delimiter string name string id string } func (b *Bucket) listObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) { if c == nil { c = &cursor{} } fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.prefix, c.delimiter) if err != nil { return nil, nil, err } var next *cursor if name != "" && id != "" { next = &cursor{ prefix: c.prefix, delimiter: c.delimiter, name: name, id: id, } } var objects []*Object for _, f := range fs { objects = append(objects, &Object{ name: f.name(), f: f, b: b, }) } var rtnErr error if len(objects) == 0 || next == nil { rtnErr = io.EOF } return objects, next, rtnErr } func (b *Bucket) listCurrentObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) { if c == nil { c = &cursor{} } fs, name, err := b.b.listFileNames(ctx, count, c.name, c.prefix, c.delimiter) if err != nil { return nil, nil, err } var next *cursor if name != "" { next = &cursor{ prefix: c.prefix, delimiter: c.delimiter, name: name, } } var objects []*Object for _, f := range fs { objects = append(objects, &Object{ name: f.name(), f: f, b: b, }) } var rtnErr error if len(objects) == 0 || next == nil { rtnErr = io.EOF } return objects, next, rtnErr } func (b *Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) { if c == nil { c = &cursor{} } fs, name, err := b.b.listUnfinishedLargeFiles(ctx, count, c.name) if err != nil { return nil, nil, err } var next *cursor if name != "" { next = &cursor{ name: name, } } var objects []*Object for _, f := range fs { objects = append(objects, &Object{ name: f.name(), f: f, b: b, }) } var rtnErr error if len(objects) == 0 || next == nil { rtnErr = io.EOF } return objects, next, rtnErr } blazer-0.6.1/b2/key.go000066400000000000000000000105421451327606200144270ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "context" "errors" "io" "time" ) // Key is a B2 application key. A Key grants limited access on a global or // per-bucket basis. type Key struct { c *Client k beKeyInterface } // Capabilities returns the list of capabilites granted by this application // key. func (k *Key) Capabilities() []string { return k.k.caps() } // Name returns the user-supplied name of this application key. Key names are // useless. func (k *Key) Name() string { return k.k.name() } // Expires returns the expiration date of this application key. func (k *Key) Expires() time.Time { return k.k.expires() } // Delete removes the key from B2. func (k *Key) Delete(ctx context.Context) error { return k.k.del(ctx) } // Secret returns the value that should be passed into NewClient(). It is only // available on newly created keys; it is not available from ListKey // operations. func (k *Key) Secret() string { return k.k.secret() } // ID returns the application key ID. This, plus the secret, is necessary to // authenticate to B2. func (k *Key) ID() string { return k.k.id() } type keyOptions struct { caps []string prefix string lifetime time.Duration } // KeyOption specifies desired properties for application keys. type KeyOption func(*keyOptions) // Lifetime requests a key with the given lifetime. func Lifetime(d time.Duration) KeyOption { return func(k *keyOptions) { k.lifetime = d } } // Deadline requests a key that expires after the given date. func Deadline(t time.Time) KeyOption { d := t.Sub(time.Now()) return Lifetime(d) } // Capabilities requests a key with the given capability. func Capabilities(caps ...string) KeyOption { return func(k *keyOptions) { k.caps = append(k.caps, caps...) } } // Prefix limits the requested application key to be valid only for objects // that begin with prefix. This can only be used when requesting an // application key within a specific bucket. func Prefix(prefix string) KeyOption { return func(k *keyOptions) { k.prefix = prefix } } // CreateKey creates a global application key that is valid for all buckets in // this project. The key's secret will only be accessible on the object // returned from this call. func (c *Client) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) { var ko keyOptions for _, o := range opts { o(&ko) } if ko.prefix != "" { return nil, errors.New("Prefix is not a valid option for global application keys") } ki, err := c.backend.createKey(ctx, name, ko.caps, ko.lifetime, "", "") if err != nil { return nil, err } return &Key{ c: c, k: ki, }, nil } // ListKeys lists all the keys associated with this project. It takes the // maximum number of keys it should return in a call, as well as a cursor // (which should be empty for the initial call). It will return up to count // keys, as well as the cursor for the next invocation. // // ListKeys returns io.EOF when there are no more keys, although it may do so // concurrently with the final set of keys. func (c *Client) ListKeys(ctx context.Context, count int, cursor string) ([]*Key, string, error) { ks, next, err := c.backend.listKeys(ctx, count, cursor) if err != nil { return nil, "", err } if len(ks) == 0 { return nil, "", io.EOF } var keys []*Key for _, k := range ks { keys = append(keys, &Key{ c: c, k: k, }) } var rerr error if next == "" { rerr = io.EOF } return keys, next, rerr } // CreateKey creates a scoped application key that is valid only for this bucket. func (b *Bucket) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) { var ko keyOptions for _, o := range opts { o(&ko) } ki, err := b.r.createKey(ctx, name, ko.caps, ko.lifetime, b.b.id(), ko.prefix) if err != nil { return nil, err } return &Key{ c: b.c, k: ki, }, nil } blazer-0.6.1/b2/licenses.csv000066400000000000000000000001351451327606200156270ustar00rootroot00000000000000github.com/Backblaze/blazer,https://github.com/Backblaze/blazer/blob/HEAD/LICENSE,Apache-2.0 blazer-0.6.1/b2/monitor.go000066400000000000000000000135021451327606200153250ustar00rootroot00000000000000// Copyright 2017, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "fmt" "html/template" "math" "net/http" "sort" "time" "github.com/Backblaze/blazer/internal/b2assets" "github.com/Backblaze/blazer/x/window" ) // StatusInfo reports information about a client. type StatusInfo struct { // Writers contains the status of all current uploads with progress. Writers map[string]*WriterStatus // Readers contains the status of all current downloads with progress. Readers map[string]*ReaderStatus // RPCs contains information about recently made RPC calls over the last // minute, five minutes, hour, and for all time. RPCs map[time.Duration]MethodList } // MethodList is an accumulation of RPC calls that have been made over a given // period of time. type MethodList []method // CountByMethod returns the total RPC calls made per method. func (ml MethodList) CountByMethod() map[string]int { r := make(map[string]int) for i := range ml { r[ml[i].name]++ } return r } type method struct { name string duration time.Duration status int } type methodCounter struct { d time.Duration w *window.Window } func (mc methodCounter) record(m method) { mc.w.Insert([]method{m}) } func (mc methodCounter) retrieve() MethodList { ms := mc.w.Reduce() return MethodList(ms.([]method)) } func newMethodCounter(d, res time.Duration) methodCounter { r := func(i, j interface{}) interface{} { a, ok := i.([]method) if !ok { a = nil } b, ok := j.([]method) if !ok { b = nil } for _, m := range b { a = append(a, m) } return a } return methodCounter{ d: d, w: window.New(d, res, r), } } // WriterStatus reports the status for each writer. type WriterStatus struct { // Progress is a slice of completion ratios. The index of a ratio is its // chunk id less one. Progress []float64 } // ReaderStatus reports the status for each reader. type ReaderStatus struct { // Progress is a slice of completion ratios. The index of a ratio is its // chunk id less one. Progress []float64 } // Status returns information about the current state of the client. func (c *Client) Status() *StatusInfo { c.slock.Lock() defer c.slock.Unlock() si := &StatusInfo{ Writers: make(map[string]*WriterStatus), Readers: make(map[string]*ReaderStatus), RPCs: make(map[time.Duration]MethodList), } for name, w := range c.sWriters { si.Writers[name] = w.status() } for name, r := range c.sReaders { si.Readers[name] = r.status() } for _, c := range c.sMethods { si.RPCs[c.d] = c.retrieve() } return si } func (si *StatusInfo) table() map[string]map[string]int { r := make(map[string]map[string]int) for d, c := range si.RPCs { for _, m := range c { if _, ok := r[m.name]; !ok { r[m.name] = make(map[string]int) } dur := "all time" if d > 0 { dur = d.String() } r[m.name][dur]++ } } return r } func (c *Client) addWriter(w *Writer) { c.slock.Lock() defer c.slock.Unlock() if c.sWriters == nil { c.sWriters = make(map[string]*Writer) } c.sWriters[fmt.Sprintf("%s/%s", w.o.b.Name(), w.name)] = w } func (c *Client) removeWriter(w *Writer) { c.slock.Lock() defer c.slock.Unlock() if c.sWriters == nil { return } delete(c.sWriters, fmt.Sprintf("%s/%s", w.o.b.Name(), w.name)) } func (c *Client) addReader(r *Reader) { c.slock.Lock() defer c.slock.Unlock() if c.sReaders == nil { c.sReaders = make(map[string]*Reader) } c.sReaders[fmt.Sprintf("%s/%s", r.o.b.Name(), r.name)] = r } func (c *Client) removeReader(r *Reader) { c.slock.Lock() defer c.slock.Unlock() if c.sReaders == nil { return } delete(c.sReaders, fmt.Sprintf("%s/%s", r.o.b.Name(), r.name)) } var ( funcMap = template.FuncMap{ "inc": func(i int) int { return i + 1 }, "lookUp": func(m map[string]int, s string) int { return m[s] }, "pRange": func(i int) string { f := float64(i) min := int(math.Pow(2, f)) - 1 max := min + int(math.Pow(2, f)) return fmt.Sprintf("%v - %v", time.Duration(min)*time.Millisecond, time.Duration(max)*time.Millisecond) }, "methods": func(si *StatusInfo) []string { methods := make(map[string]bool) for _, ms := range si.RPCs { for _, m := range ms { methods[m.name] = true } } var names []string for name := range methods { names = append(names, name) } sort.Strings(names) return names }, "durations": func(si *StatusInfo) []string { var ds []time.Duration for d := range si.RPCs { ds = append(ds, d) } sort.Slice(ds, func(i, j int) bool { return ds[i] < ds[j] }) var r []string for _, d := range ds { dur := "all time" if d > 0 { dur = d.String() } r = append(r, dur) } return r }, "table": func(si *StatusInfo) map[string]map[string]int { return si.table() }, } statusTemplate = template.Must(template.New("status").Funcs(funcMap).Parse(string(b2assets.MustAsset("data/status.html")))) ) // ServeHTTP serves diagnostic information about the current state of the // client; essentially everything available from Client.Status() // // ServeHTTP satisfies the http.Handler interface. This means that a Client // can be passed directly to a path via http.Handle (or on a custom ServeMux or // a custom http.Server). func (c *Client) ServeHTTP(rw http.ResponseWriter, req *http.Request) { info := c.Status() statusTemplate.Execute(rw, info) } blazer-0.6.1/b2/reader.go000066400000000000000000000171101451327606200150770ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "bytes" "context" "crypto/sha1" "errors" "fmt" "hash" "io" "sync" "time" "github.com/Backblaze/blazer/internal/blog" ) var errNoMoreContent = errors.New("416: out of content") // Reader reads files from B2. type Reader struct { // ConcurrentDownloads is the number of simultaneous downloads to pull from // B2. Values greater than one will cause B2 to make multiple HTTP requests // for a given file, increasing available bandwidth at the cost of buffering // the downloads in memory. ConcurrentDownloads int // ChunkSize is the size to fetch per ConcurrentDownload. The default is // 10MB. ChunkSize int ctx context.Context cancel context.CancelFunc // cancels ctx o *Object name string offset int64 // the start of the file length int64 // the length to read, or -1 csize int // chunk size read int // amount read chwid int // chunks written chrid int // chunks read chbuf chan *rchunk init sync.Once chunks map[int]*rchunk vrfy hash.Hash readOffEnd bool sha1 string rmux sync.Mutex // guards rcond rcond *sync.Cond emux sync.RWMutex // guards err, believe it or not err error smux sync.Mutex smap map[int]*meteredReader } type rchunk struct { bytes.Buffer final bool } // Close frees resources associated with the download. func (r *Reader) Close() error { r.cancel() r.o.b.c.removeReader(r) return nil } func (r *Reader) setErr(err error) { r.emux.Lock() defer r.emux.Unlock() if r.err == nil { r.err = err r.cancel() } } func (r *Reader) setErrNoCancel(err error) { r.emux.Lock() defer r.emux.Unlock() if r.err == nil { r.err = err } } func (r *Reader) getErr() error { r.emux.RLock() defer r.emux.RUnlock() return r.err } func (r *Reader) thread() { go func() { for { var buf *rchunk select { case b, ok := <-r.chbuf: if !ok { return } buf = b case <-r.ctx.Done(): return } r.rmux.Lock() chunkID := r.chwid r.chwid++ r.rmux.Unlock() offset := int64(chunkID*r.csize) + r.offset size := int64(r.csize) if r.length > 0 { if size > r.length { buf.final = true size = r.length } r.length -= size } var b backoff redo: fr, err := r.o.b.b.downloadFileByName(r.ctx, r.name, offset, size, false) if err == errNoMoreContent { // this read generated a 416 so we are entirely past the end of the object r.readOffEnd = true buf.final = true r.rmux.Lock() r.chunks[chunkID] = buf r.rmux.Unlock() r.rcond.Broadcast() return } if err != nil { r.setErr(err) r.rcond.Broadcast() return } rsize, _, sha1, _ := fr.stats() if len(sha1) == 40 && r.sha1 != sha1 { r.sha1 = sha1 } mr := &meteredReader{r: noopResetter{fr}, size: int(rsize)} r.smux.Lock() r.smap[chunkID] = mr r.smux.Unlock() i, err := copyContext(r.ctx, buf, mr) fr.Close() r.smux.Lock() r.smap[chunkID] = nil r.smux.Unlock() if i < int64(rsize) || err == io.ErrUnexpectedEOF { // Probably the network connection was closed early. Retry. blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying after %v", chunkID, i, rsize, b) if err := b.wait(r.ctx); err != nil { r.setErr(err) r.rcond.Broadcast() return } buf.Reset() goto redo } if err != nil { r.setErr(err) r.rcond.Broadcast() return } r.rmux.Lock() r.chunks[chunkID] = buf r.rmux.Unlock() r.rcond.Broadcast() } }() } func (r *Reader) curChunk() (*rchunk, error) { ch := make(chan *rchunk) go func() { r.rmux.Lock() defer r.rmux.Unlock() for r.chunks[r.chrid] == nil && r.getErr() == nil && r.ctx.Err() == nil { r.rcond.Wait() } select { case ch <- r.chunks[r.chrid]: case <-r.ctx.Done(): return } }() select { case buf := <-ch: return buf, r.getErr() case <-r.ctx.Done(): if r.getErr() != nil { return nil, r.getErr() } return nil, r.ctx.Err() } } func (r *Reader) initFunc() { r.smux.Lock() r.smap = make(map[int]*meteredReader) r.smux.Unlock() r.o.b.c.addReader(r) r.rcond = sync.NewCond(&r.rmux) cr := r.ConcurrentDownloads if cr < 1 { cr = 1 } if r.ChunkSize < 1 { r.ChunkSize = 1e7 } r.csize = r.ChunkSize r.chbuf = make(chan *rchunk, cr) for i := 0; i < cr; i++ { r.thread() r.chbuf <- &rchunk{} } r.vrfy = sha1.New() } func (r *Reader) Read(p []byte) (int, error) { if err := r.getErr(); err != nil { return 0, err } r.init.Do(r.initFunc) chunk, err := r.curChunk() if err != nil { r.setErrNoCancel(err) return 0, err } n, err := chunk.Read(p) r.vrfy.Write(p[:n]) // Hash.Write never returns an error. r.read += n if err == io.EOF { if chunk.final { close(r.chbuf) r.setErrNoCancel(err) return n, err } r.chrid++ chunk.Reset() r.chbuf <- chunk err = nil } r.setErrNoCancel(err) return n, err } func (r *Reader) status() *ReaderStatus { r.smux.Lock() defer r.smux.Unlock() rs := &ReaderStatus{ Progress: make([]float64, len(r.smap)), } for i := 1; i <= len(r.smap); i++ { rs.Progress[i-1] = r.smap[i].done() } return rs } // Verify checks the SHA1 hash on download and compares it to the SHA1 hash // submitted on upload. If the two differ, this returns an error. If the // correct hash could not be calculated (if, for example, the entire object was // not read, or if the object was uploaded as a "large file" and thus the SHA1 // hash was not sent), this returns (nil, false). func (r *Reader) Verify() (error, bool) { got := fmt.Sprintf("%x", r.vrfy.Sum(nil)) if r.sha1 == got { return nil, true } // TODO: if the exact length of the file is requested AND the checksum is // bad, this will return (nil, false) instead of (an error, true). This is // because there's no good way that I can tell to determine that we've hit // the end of the file without reading off the end. Consider reading N+1 // bytes at the very end to close this hole. if r.offset > 0 || !r.readOffEnd || len(r.sha1) != 40 { return nil, false } return fmt.Errorf("bad hash: got %v, want %v", got, r.sha1), true } // strip a writer of any non-Write methods type onlyWriter struct{ w io.Writer } func (ow onlyWriter) Write(p []byte) (int, error) { return ow.w.Write(p) } func copyContext(ctx context.Context, w io.Writer, r io.Reader) (int64, error) { var n int64 var err error done := make(chan struct{}) go func() { if _, ok := w.(*Writer); ok { w = onlyWriter{w} } n, err = io.Copy(w, r) close(done) }() select { case <-done: return n, err case <-ctx.Done(): return 0, ctx.Err() } } type noopResetter struct { io.Reader } func (noopResetter) Reset() error { return nil } type backoff time.Duration func (b *backoff) wait(ctx context.Context) error { if *b == 0 { *b = backoff(time.Millisecond) } select { case <-time.After(time.Duration(*b)): if time.Duration(*b) < time.Second*10 { *b <<= 1 } return nil case <-ctx.Done(): return ctx.Err() } } func (b backoff) String() string { return time.Duration(b).String() } blazer-0.6.1/b2/readerat.go000066400000000000000000000023161451327606200154260ustar00rootroot00000000000000// Copyright 2017, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "io" "sync" ) type readerAt struct { rs io.ReadSeeker mu sync.Mutex } func (r *readerAt) ReadAt(p []byte, off int64) (int, error) { r.mu.Lock() defer r.mu.Unlock() // ReadAt is supposed to preserve the offset. cur, err := r.rs.Seek(0, io.SeekCurrent) if err != nil { return 0, err } defer r.rs.Seek(cur, io.SeekStart) if _, err := r.rs.Seek(off, io.SeekStart); err != nil { return 0, err } return io.ReadFull(r.rs, p) } // wraps a ReadSeeker in a mutex to provite a ReaderAt how is this not in the // io package? func enReaderAt(rs io.ReadSeeker) io.ReaderAt { return &readerAt{rs: rs} } blazer-0.6.1/b2/writer.go000066400000000000000000000333531451327606200151600ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b2 import ( "context" "errors" "fmt" "io" "sync" "sync/atomic" "time" "github.com/Backblaze/blazer/internal/blog" ) // Writer writes data into Backblaze. It automatically switches to the large // file API if the file exceeds ChunkSize bytes. Due to that and other // Backblaze API details, there is a large buffer. // // Changes to public Writer attributes must be made before the first call to // Write. type Writer struct { // ConcurrentUploads is number of different threads sending data concurrently // to Backblaze for large files. This can increase performance greatly, as // each thread will hit a different endpoint. However, there is a ChunkSize // buffer for each thread. Values less than 1 are equivalent to 1. ConcurrentUploads int // Resume an upload. If true, and the upload is a large file, and a file of // the same name was started but not finished, then assume that we are // resuming that file, and don't upload duplicate chunks. Resume bool // ChunkSize is the size, in bytes, of each individual part, when writing // large files, and also when determining whether to upload a file normally // or when to split it into parts. The default is 100M (1e8) The minimum is // 5M (5e6); values less than this are not an error, but will fail. The // maximum is 5GB (5e9). ChunkSize int // UseFileBuffer controls whether to use an in-memory buffer (the default) or // scratch space on the file system. If this is true, b2 will save chunks in // FileBufferDir. UseFileBuffer bool // FileBufferDir specifies the directory where scratch files are kept. If // blank, os.TempDir() is used. FileBufferDir string contentType string info map[string]string csize int ctx context.Context cancel context.CancelFunc // cancels ctx ctxf func() context.Context errf func(error) ready chan chunk cdone chan struct{} wg sync.WaitGroup start sync.Once once sync.Once done sync.Once file beLargeFileInterface seen map[int]string everStarted bool newBuffer func() (writeBuffer, error) o *Object name string cidx int w writeBuffer emux sync.RWMutex err error smux sync.RWMutex smap map[int]*meteredReader } type chunk struct { id int buf writeBuffer } func (w *Writer) setErr(err error) { if err == nil || err == io.EOF { return } w.emux.Lock() defer w.emux.Unlock() if w.err != nil { return } blog.V(1).Infof("error writing %s: %v", w.name, err) w.err = err w.cancel() if w.ctxf == nil { return } if w.errf == nil { w.errf = func(error) {} } w.errf(w.file.cancel(w.ctxf())) } func (w *Writer) getErr() error { w.emux.RLock() defer w.emux.RUnlock() return w.err } func (w *Writer) registerChunk(id int, r *meteredReader) { w.smux.Lock() w.smap[id] = r w.smux.Unlock() } func (w *Writer) completeChunk(id int) { w.smux.Lock() w.smap[id] = nil w.smux.Unlock() } var gid int32 func sleepCtx(ctx context.Context, d time.Duration) error { select { case <-ctx.Done(): return ctx.Err() case <-time.After(d): return nil } } func (w *Writer) thread() { w.wg.Add(1) go func() { defer w.wg.Done() id := atomic.AddInt32(&gid, 1) fc, err := w.file.getUploadPartURL(w.ctx) if err != nil { w.setErr(err) return } for { var cnk chunk select { case cnk = <-w.ready: case <-w.cdone: return } if sha, ok := w.seen[cnk.id]; ok { if sha != cnk.buf.Hash() { w.setErr(errors.New("resumable upload was requested, but chunks don't match")) return } cnk.buf.Close() w.completeChunk(cnk.id) blog.V(2).Infof("skipping chunk %d", cnk.id) continue } blog.V(2).Infof("thread %d handling chunk %d", id, cnk.id) r, err := cnk.buf.Reader() if err != nil { w.setErr(err) return } mr := &meteredReader{r: r, size: cnk.buf.Len()} w.registerChunk(cnk.id, mr) sleep := time.Millisecond * 15 redo: n, err := fc.uploadPart(w.ctx, mr, cnk.buf.Hash(), cnk.buf.Len(), cnk.id) if n != cnk.buf.Len() || err != nil { if w.o.b.r.reupload(err) { if err := sleepCtx(w.ctx, sleep); err != nil { w.setErr(err) w.completeChunk(cnk.id) cnk.buf.Close() // TODO: log error } sleep *= 2 if sleep > time.Second*15 { sleep = time.Second * 15 } blog.V(1).Infof("b2 writer: wrote %d of %d: error: %v; retrying", n, cnk.buf.Len(), err) f, err := w.file.getUploadPartURL(w.ctx) if err != nil { w.setErr(err) w.completeChunk(cnk.id) cnk.buf.Close() // TODO: log error return } fc = f goto redo } w.setErr(err) w.completeChunk(cnk.id) cnk.buf.Close() // TODO: log error return } w.completeChunk(cnk.id) cnk.buf.Close() // TODO: log error blog.V(2).Infof("chunk %d handled", cnk.id) } }() } func (w *Writer) init() { w.start.Do(func() { w.everStarted = true w.smux.Lock() w.smap = make(map[int]*meteredReader) w.smux.Unlock() w.o.b.c.addWriter(w) w.csize = w.ChunkSize if w.csize == 0 { w.csize = 1e8 } if w.newBuffer == nil { w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil } if w.UseFileBuffer { w.newBuffer = func() (writeBuffer, error) { return newFileBuffer(w.FileBufferDir) } } } v, err := w.newBuffer() if err != nil { w.setErr(err) return } w.w = v }) } // Write satisfies the io.Writer interface. func (w *Writer) Write(p []byte) (int, error) { if len(p) == 0 { return 0, nil } w.init() if err := w.getErr(); err != nil { return 0, err } left := w.csize - w.w.Len() if len(p) < left { return w.w.Write(p) } i, err := w.w.Write(p[:left]) if err != nil { w.setErr(err) return i, err } if err := w.sendChunk(); err != nil { w.setErr(err) return i, w.getErr() } k, err := w.Write(p[left:]) if err != nil { w.setErr(err) } return i + k, err } func (w *Writer) getUploadURL(ctx context.Context) (beURLInterface, error) { u := w.o.b.urlPool.get() if u == nil { return w.o.b.b.getUploadURL(w.ctx) } return u, nil } func (w *Writer) simpleWriteFile() error { ue, err := w.getUploadURL(w.ctx) if err != nil { return err } // This defer needs to be in a func() so that we put whatever the value of ue // is at function exit. defer func() { w.o.b.urlPool.put(ue) }() sha1 := w.w.Hash() ctype := w.contentType if ctype == "" { ctype = "application/octet-stream" } r, err := w.w.Reader() if err != nil { return err } mr := &meteredReader{r: r, size: w.w.Len()} w.registerChunk(1, mr) defer w.completeChunk(1) redo: f, err := ue.uploadFile(w.ctx, mr, int(w.w.Len()), w.name, ctype, sha1, w.info) if err != nil { if w.o.b.r.reupload(err) { blog.V(2).Infof("b2 writer: %v; retrying", err) u, err := w.o.b.b.getUploadURL(w.ctx) if err != nil { return err } ue = u goto redo } return err } w.o.f = f return nil } func (w *Writer) getLargeFile() (beLargeFileInterface, error) { if !w.Resume { ctype := w.contentType if ctype == "" { ctype = "application/octet-stream" } return w.o.b.b.startLargeFile(w.ctx, w.name, ctype, w.info) } var got bool iter := w.o.b.List(w.ctx, ListPrefix(w.name), ListUnfinished()) var fi beFileInterface for iter.Next() { obj := iter.Object() if obj.Name() == w.name { got = true fi = obj.f } } if iter.Err() != nil { return nil, iter.Err() } if !got { w.Resume = false return w.getLargeFile() } next := 1 seen := make(map[int]string) var size int64 for { parts, n, err := fi.listParts(w.ctx, next, 100) if err != nil { return nil, err } next = n for _, p := range parts { seen[p.number()] = p.sha1() size += p.size() } if len(parts) == 0 { break } if next == 0 { break } } w.seen = make(map[int]string) // copy the map for id, sha := range seen { w.seen[id] = sha } return fi.compileParts(size, seen), nil } func (w *Writer) sendChunk() error { var err error w.once.Do(func() { lf, e := w.getLargeFile() if e != nil { err = e return } w.file = lf w.ready = make(chan chunk) w.cdone = make(chan struct{}) if w.ConcurrentUploads < 1 { w.ConcurrentUploads = 1 } for i := 0; i < w.ConcurrentUploads; i++ { w.thread() } }) if err != nil { return err } select { case <-w.cdone: return nil case w.ready <- chunk{ id: w.cidx + 1, buf: w.w, }: case <-w.ctx.Done(): return w.ctx.Err() } w.cidx++ v, err := w.newBuffer() if err != nil { return err } w.w = v return nil } // ReadFrom reads all of r into w, returning the first error or no error if r // returns io.EOF. If r is also an io.Seeker, ReadFrom will stream r directly // over the wire instead of buffering it locally. This reduces memory usage. // // Do not issue multiple calls to ReadFrom, or mix ReadFrom and Write. If you // have multiple readers you want to concatenate into the same B2 object, use // an io.MultiReader. // // Note that io.Copy will automatically choose to use ReadFrom. // // ReadFrom currently doesn't handle w.Resume; if w.Resume is true, ReadFrom // will act as if r is not an io.Seeker. func (w *Writer) ReadFrom(r io.Reader) (int64, error) { rs, ok := r.(io.ReadSeeker) if !ok || w.Resume { return copyContext(w.ctx, w, r) } blog.V(2).Info("streaming without buffer") size, err := rs.Seek(0, io.SeekEnd) if err != nil { return 0, err } var ra io.ReaderAt if rat, ok := r.(io.ReaderAt); ok { ra = rat } else { ra = enReaderAt(rs) } var offset int64 var wrote int64 w.newBuffer = func() (writeBuffer, error) { left := size - offset if left <= 0 { // We're done sending real chunks; send empty chunks from now on so that // Close() works. w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil } w.w = newMemoryBuffer() return nil, io.EOF } csize := int64(w.csize) if left < csize { csize = left } nb := newNonBuffer(ra, offset, csize) wrote += csize // TODO: this is kind of a total lie offset += csize return nb, nil } w.init() if size < int64(w.csize) { // the magic happens on w.Close() return size, nil } for { if err := w.sendChunk(); err != nil { if err != io.EOF { return wrote, err } return wrote, nil } } } // Close satisfies the io.Closer interface. It is critical to check the return // value of Close for all writers. func (w *Writer) Close() error { w.done.Do(func() { if !w.everStarted { w.init() w.setErr(w.simpleWriteFile()) return } defer w.o.b.c.removeWriter(w) defer func() { if err := w.w.Close(); err != nil { // this is non-fatal, but alarming blog.V(1).Infof("close %s: %v", w.name, err) } }() if w.cidx == 0 { w.setErr(w.simpleWriteFile()) return } if w.w.Len() > 0 { if err := w.sendChunk(); err != nil { w.setErr(err) return } } // See https://github.com/Backblaze/blazer/issues/60 for why we use a special // channel for this. close(w.cdone) w.wg.Wait() f, err := w.file.finishLargeFile(w.ctx) if err != nil { w.setErr(err) return } w.o.f = f }) return w.getErr() } func (w *Writer) withAttrs(attrs *Attrs) *Writer { w.contentType = attrs.ContentType w.info = make(map[string]string) for k, v := range attrs.Info { w.info[k] = v } if len(w.info) < 10 && attrs.SHA1 != "" { w.info["large_file_sha1"] = attrs.SHA1 } if len(w.info) < 10 && !attrs.LastModified.IsZero() { w.info["src_last_modified_millis"] = fmt.Sprintf("%d", attrs.LastModified.UnixNano()/1e6) } return w } // A WriterOption sets Writer-specific behavior. type WriterOption func(*Writer) // WithAttrs attaches the given Attrs to the writer. func WithAttrsOption(attrs *Attrs) WriterOption { return func(w *Writer) { w.withAttrs(attrs) } } // WithCancelOnError requests the writer, if it has started a large file // upload, to call b2_cancel_large_file on any permanent error. It calls ctxf // to obtain a context with which to cancel the file; this is to allow callers // to set specific timeouts. If errf is non-nil, then it is called with the // (possibly nil) output of b2_cancel_large_file. func WithCancelOnError(ctxf func() context.Context, errf func(error)) WriterOption { return func(w *Writer) { w.ctxf = ctxf w.errf = errf } } // DefaultWriterOptions returns a ClientOption that will apply the given // WriterOptions to every Writer. These options can be overridden by passing // new options to NewWriter. func DefaultWriterOptions(opts ...WriterOption) ClientOption { return func(c *clientOptions) { c.writerOpts = opts } } func (w *Writer) status() *WriterStatus { w.smux.RLock() defer w.smux.RUnlock() ws := &WriterStatus{ Progress: make([]float64, len(w.smap)), } for i := 1; i <= len(w.smap); i++ { ws.Progress[i-1] = w.smap[i].done() } return ws } type meteredReader struct { read int64 size int r readResetter mux sync.Mutex } func (mr *meteredReader) Read(p []byte) (int, error) { mr.mux.Lock() defer mr.mux.Unlock() n, err := mr.r.Read(p) mr.read += int64(n) return n, err } func (mr *meteredReader) Reset() error { mr.mux.Lock() defer mr.mux.Unlock() mr.read = 0 return mr.r.Reset() } func (mr *meteredReader) done() float64 { if mr == nil { return 1 } read := float64(atomic.LoadInt64(&mr.read)) return read / float64(mr.size) } blazer-0.6.1/base/000077500000000000000000000000001451327606200137155ustar00rootroot00000000000000blazer-0.6.1/base/base.go000066400000000000000000001065171451327606200151700ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package base provides a very low-level interface on top of the B2 v1 API. // It is not intended to be used directly. // // It currently lacks support for the following APIs: // // b2_download_file_by_id package base import ( "bytes" "context" "crypto/x509" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "regexp" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/Backblaze/blazer/internal/b2types" "github.com/Backblaze/blazer/internal/blog" ) const ( APIBase = "https://api.backblazeb2.com" DefaultUserAgent = "blazer/0.6.1" ) type b2err struct { msg string method string retry int code int msgCode string } func (e b2err) Error() string { if e.method == "" { return fmt.Sprintf("b2 error: %s", e.msg) } return fmt.Sprintf("%s: %d: %s", e.method, e.code, e.msg) } // Action checks an error and returns a recommended course of action. func Action(err error) ErrAction { e, ok := err.(b2err) if !ok { return Punt } if e.retry > 0 { return Retry } if e.code >= 500 && e.code < 600 && (e.method == "b2_upload_file" || e.method == "b2_upload_part") { return AttemptNewUpload } switch e.code { case 401: switch e.method { case "b2_authorize_account": return Punt case "b2_upload_file", "b2_upload_part": return AttemptNewUpload } return ReAuthenticate case 400: // See restic/restic#1207 if e.method == "b2_upload_file" && strings.HasPrefix(e.msg, "more than one upload using auth token") { return AttemptNewUpload } return Punt case 408: return AttemptNewUpload case 429, 500, 503: return Retry } return Punt } // ErrAction is an action that a caller can take when any function returns an // error. type ErrAction int // Code returns the error code and message. func Code(err error) (int, string) { e, ok := err.(b2err) if !ok { return 0, "" } return e.code, e.msg } // MsgCode returns the error code, msgCode and message. func MsgCode(err error) (int, string, string) { e, ok := err.(b2err) if !ok { return 0, "", "" } return e.code, e.msgCode, e.msg } const ( // ReAuthenticate indicates that the B2 account authentication tokens have // expired, and should be refreshed with a new call to AuthorizeAccount. ReAuthenticate ErrAction = iota // AttemptNewUpload indicates that an upload's authentication token (or URL // endpoint) has expired, and that users should request new ones with a call // to GetUploadURL or GetUploadPartURL. AttemptNewUpload // Retry indicates that the caller should wait an appropriate amount of time, // and then reattempt the RPC. Retry // Punt means that there is no useful action to be taken on this error, and // that it should be displayed to the user. Punt ) func mkErr(resp *http.Response) error { data, err := ioutil.ReadAll(resp.Body) var msgBody string if err != nil { msgBody = fmt.Sprintf("couldn't read message body: %v", err) } logResponse(resp, data) msg := &b2types.ErrorMessage{} if err := json.Unmarshal(data, msg); err != nil { if msgBody != "" { msgBody = fmt.Sprintf("couldn't read message body: %v", err) } } if msgBody == "" { msgBody = msg.Msg } var retryAfter int retry := resp.Header.Get("Retry-After") if retry != "" { r, err := strconv.ParseInt(retry, 10, 64) if err != nil { r = 0 blog.V(1).Infof("couldn't parse retry-after header %q: %v", retry, err) } retryAfter = int(r) } return b2err{ msg: msgBody, retry: retryAfter, code: resp.StatusCode, msgCode: msg.Code, method: resp.Request.Header.Get("X-Blazer-Method"), } } // Backoff returns an appropriate amount of time to wait, given an error, if // any was returned by the server. If the return value is 0, but Action // indicates Retry, the user should implement their own exponential backoff, // beginning with one second. func Backoff(err error) time.Duration { e, ok := err.(b2err) if !ok { return 0 } return time.Duration(e.retry) * time.Second } func logRequest(req *http.Request, args []byte) { if !blog.V(2) { return } var headers []string for k, v := range req.Header { if k == "Authorization" || k == "X-Blazer-Method" { continue } headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) } hstr := strings.Join(headers, ";") method := req.Header.Get("X-Blazer-Method") if args != nil { blog.V(2).Infof(">> %s %v: %v headers: {%s} args: (%s)", method, req.Method, req.URL, hstr, string(args)) return } blog.V(2).Infof(">> %s %v: %v {%s} (no args)", method, req.Method, req.URL, hstr) } var authRegexp = regexp.MustCompile(`"authorizationToken": ".[^"]*"`) func logResponse(resp *http.Response, reply []byte) { if !blog.V(2) { return } var headers []string for k, v := range resp.Header { headers = append(headers, fmt.Sprintf("%s: %s", k, strings.Join(v, ","))) } hstr := strings.Join(headers, "; ") method := resp.Request.Header.Get("X-Blazer-Method") id := resp.Request.Header.Get("X-Blazer-Request-ID") if reply != nil { safe := string(authRegexp.ReplaceAll(reply, []byte(`"authorizationToken": "[redacted]"`))) blog.V(2).Infof("<< %s (%s) %s {%s} (%s)", method, id, resp.Status, hstr, safe) return } blog.V(2).Infof("<< %s (%s) %s {%s} (no reply)", method, id, resp.Status, hstr) } func millitime(t int64) time.Time { return time.Unix(t/1000, t%1000*1e6) } type b2Options struct { transport http.RoundTripper failSomeUploads bool expireTokens bool capExceeded bool apiBase string userAgent string } func (o *b2Options) addHeaders(req *http.Request) { if o.failSomeUploads { req.Header.Add("X-Bz-Test-Mode", "fail_some_uploads") } if o.expireTokens { req.Header.Add("X-Bz-Test-Mode", "expire_some_account_authorization_tokens") } if o.capExceeded { req.Header.Add("X-Bz-Test-Mode", "force_cap_exceeded") } req.Header.Set("User-Agent", o.getUserAgent()) } func (o *b2Options) getAPIBase() string { if o.apiBase != "" { return o.apiBase } return APIBase } func (o *b2Options) getUserAgent() string { if o.userAgent != "" { return fmt.Sprintf("%s %s", o.userAgent, DefaultUserAgent) } return DefaultUserAgent } func (o *b2Options) getTransport() http.RoundTripper { if o.transport == nil { return http.DefaultTransport } return o.transport } // B2 holds account information for Backblaze. type B2 struct { accountID string authToken string apiURI string downloadURI string minPartSize int opts *b2Options bucket string // restricted to this bucket if present pfx string // restricted to objects with this prefix if present } // Update replaces the B2 object with a new one, in-place. func (b *B2) Update(n *B2) { b.accountID = n.accountID b.authToken = n.authToken b.apiURI = n.apiURI b.downloadURI = n.downloadURI b.minPartSize = n.minPartSize b.opts = n.opts } type httpReply struct { resp *http.Response err error } func makeNetRequest(ctx context.Context, req *http.Request, rt http.RoundTripper) (*http.Response, error) { req = req.WithContext(ctx) resp, err := rt.RoundTrip(req) switch err { case nil: return resp, nil case context.Canceled, context.DeadlineExceeded: return nil, err default: method := req.Header.Get("X-Blazer-Method") blog.V(2).Infof(">> %s uri: %v err: %v", method, req.URL, err) switch err.(type) { case x509.UnknownAuthorityError: return nil, err } return nil, b2err{ msg: err.Error(), retry: 1, } } } type requestBody struct { size int64 body io.Reader } func (rb *requestBody) getSize() int64 { if rb == nil { return 0 } return rb.size } func (rb *requestBody) getBody() io.Reader { if rb == nil { return nil } if rb.getSize() == 0 { // https://github.com/kurin/blazer/issues/57 // When body is non-nil, but the request's ContentLength is 0, it is // replaced with -1, which causes the client to send a chunked encoding, // which confuses B2. return http.NoBody } return rb.body } type keepFinalBytes struct { r io.Reader remain int sha [40]byte } func (k *keepFinalBytes) Read(p []byte) (int, error) { n, err := k.r.Read(p) if k.remain-n > 40 { k.remain -= n return n, err } // This was a whole lot harder than it looks. pi := -40 + k.remain if pi < 0 { pi = 0 } pe := n ki := 40 - k.remain if ki < 0 { ki = 0 } ke := n - k.remain + 40 copy(k.sha[ki:ke], p[pi:pe]) k.remain -= n return n, err } var reqID int64 func (o *b2Options) makeRequest(ctx context.Context, method, verb, uri string, b2req, b2resp interface{}, headers map[string]string, body *requestBody) error { var args []byte if b2req != nil { enc, err := json.Marshal(b2req) if err != nil { return err } args = enc body = &requestBody{ body: bytes.NewBuffer(enc), size: int64(len(enc)), } } req, err := http.NewRequest(verb, uri, body.getBody()) if err != nil { return err } req.ContentLength = body.getSize() for k, v := range headers { if strings.HasPrefix(k, "X-Bz-Info") || strings.HasPrefix(k, "X-Bz-File-Name") { v = escape(v) } req.Header.Set(k, v) } req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) req.Header.Set("X-Blazer-Method", method) o.addHeaders(req) logRequest(req, args) resp, err := makeNetRequest(ctx, req, o.getTransport()) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != 200 { return mkErr(resp) } var replyArgs []byte if b2resp != nil { rbuf := &bytes.Buffer{} r := io.TeeReader(resp.Body, rbuf) decoder := json.NewDecoder(r) if err := decoder.Decode(b2resp); err != nil { return err } replyArgs = rbuf.Bytes() } else { ra, err := ioutil.ReadAll(resp.Body) if err != nil { blog.V(1).Infof("%s: couldn't read response: %v", method, err) } replyArgs = ra } logResponse(resp, replyArgs) return nil } // AuthorizeAccount wraps b2_authorize_account. func AuthorizeAccount(ctx context.Context, account, key string, opts ...AuthOption) (*B2, error) { auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", account, key))) b2resp := &b2types.AuthorizeAccountResponse{} headers := map[string]string{ "Authorization": fmt.Sprintf("Basic %s", auth), } b2opts := &b2Options{} for _, f := range opts { f(b2opts) } if err := b2opts.makeRequest(ctx, "b2_authorize_account", "GET", b2opts.getAPIBase()+b2types.V1api+"b2_authorize_account", nil, b2resp, headers, nil); err != nil { return nil, err } return &B2{ accountID: b2resp.AccountID, authToken: b2resp.AuthToken, apiURI: b2resp.URI, downloadURI: b2resp.DownloadURI, minPartSize: b2resp.PartSize, bucket: b2resp.Allowed.Bucket, pfx: b2resp.Allowed.Prefix, opts: b2opts, }, nil } // An AuthOption allows callers to choose per-session settings. type AuthOption func(*b2Options) // UserAgent sets the User-Agent HTTP header. The default header is // "blazer/"; the value set here will be prepended to that. This can // be set multiple times. func UserAgent(agent string) AuthOption { return func(o *b2Options) { if o.userAgent == "" { o.userAgent = agent return } o.userAgent = fmt.Sprintf("%s %s", agent, o.userAgent) } } // Transport returns an AuthOption that sets the underlying HTTP mechanism. func Transport(rt http.RoundTripper) AuthOption { return func(o *b2Options) { o.transport = rt } } // FailSomeUploads requests intermittent upload failures from the B2 service. // This is mostly useful for testing. func FailSomeUploads() AuthOption { return func(o *b2Options) { o.failSomeUploads = true } } // ExpireSomeAuthTokens requests intermittent authentication failures from the // B2 service. func ExpireSomeAuthTokens() AuthOption { return func(o *b2Options) { o.expireTokens = true } } // ForceCapExceeded requests a cap limit from the B2 service. This causes all // uploads to be treated as if they would exceed the configure B2 capacity. func ForceCapExceeded() AuthOption { return func(o *b2Options) { o.capExceeded = true } } // SetAPIBase returns an AuthOption that uses the given URL as the base for API // requests. func SetAPIBase(url string) AuthOption { return func(o *b2Options) { o.apiBase = url } } type LifecycleRule struct { Prefix string DaysNewUntilHidden int DaysHiddenUntilDeleted int } // CreateBucket wraps b2_create_bucket. func (b *B2) CreateBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (*Bucket, error) { if btype != "allPublic" { btype = "allPrivate" } var b2rules []b2types.LifecycleRule for _, rule := range rules { b2rules = append(b2rules, b2types.LifecycleRule{ Prefix: rule.Prefix, DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, }) } b2req := &b2types.CreateBucketRequest{ AccountID: b.accountID, Name: name, Type: btype, Info: info, LifecycleRules: b2rules, } b2resp := &b2types.CreateBucketResponse{} headers := map[string]string{ "Authorization": b.authToken, } if err := b.opts.makeRequest(ctx, "b2_create_bucket", "POST", b.apiURI+b2types.V1api+"b2_create_bucket", b2req, b2resp, headers, nil); err != nil { return nil, err } var respRules []LifecycleRule for _, rule := range b2resp.LifecycleRules { respRules = append(respRules, LifecycleRule{ Prefix: rule.Prefix, DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, }) } return &Bucket{ Name: name, Info: b2resp.Info, LifecycleRules: respRules, ID: b2resp.BucketID, rev: b2resp.Revision, b2: b, }, nil } // DeleteBucket wraps b2_delete_bucket. func (b *Bucket) DeleteBucket(ctx context.Context) error { b2req := &b2types.DeleteBucketRequest{ AccountID: b.b2.accountID, BucketID: b.ID, } headers := map[string]string{ "Authorization": b.b2.authToken, } return b.b2.opts.makeRequest(ctx, "b2_delete_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_delete_bucket", b2req, nil, headers, nil) } // Bucket holds B2 bucket details. type Bucket struct { Name string Type string Info map[string]string LifecycleRules []LifecycleRule ID string rev int b2 *B2 } // Update wraps b2_update_bucket. func (b *Bucket) Update(ctx context.Context) (*Bucket, error) { var rules []b2types.LifecycleRule for _, rule := range b.LifecycleRules { rules = append(rules, b2types.LifecycleRule{ DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, Prefix: rule.Prefix, }) } b2req := &b2types.UpdateBucketRequest{ AccountID: b.b2.accountID, BucketID: b.ID, // Name: b.Name, Type: b.Type, Info: b.Info, LifecycleRules: rules, IfRevisionIs: b.rev, } headers := map[string]string{ "Authorization": b.b2.authToken, } b2resp := &b2types.UpdateBucketResponse{} if err := b.b2.opts.makeRequest(ctx, "b2_update_bucket", "POST", b.b2.apiURI+b2types.V1api+"b2_update_bucket", b2req, b2resp, headers, nil); err != nil { return nil, err } var respRules []LifecycleRule for _, rule := range b2resp.LifecycleRules { respRules = append(respRules, LifecycleRule{ Prefix: rule.Prefix, DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, }) } return &Bucket{ Name: b.Name, Type: b2resp.Type, Info: b2resp.Info, LifecycleRules: respRules, ID: b2resp.BucketID, b2: b.b2, }, nil } // BaseURL returns the base part of the download URLs. func (b *Bucket) BaseURL() string { return b.b2.downloadURI } // ListBuckets wraps b2_list_buckets. If name is non-empty, only that bucket // will be returned if it exists; else nothing will be returned. func (b *B2) ListBuckets(ctx context.Context, name string) ([]*Bucket, error) { b2req := &b2types.ListBucketsRequest{ AccountID: b.accountID, Bucket: b.bucket, Name: name, } b2resp := &b2types.ListBucketsResponse{} headers := map[string]string{ "Authorization": b.authToken, } if err := b.opts.makeRequest(ctx, "b2_list_buckets", "POST", b.apiURI+b2types.V1api+"b2_list_buckets", b2req, b2resp, headers, nil); err != nil { return nil, err } var buckets []*Bucket for _, bucket := range b2resp.Buckets { var rules []LifecycleRule for _, rule := range bucket.LifecycleRules { rules = append(rules, LifecycleRule{ Prefix: rule.Prefix, DaysNewUntilHidden: rule.DaysNewUntilHidden, DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted, }) } buckets = append(buckets, &Bucket{ Name: bucket.Name, Type: bucket.Type, Info: bucket.Info, LifecycleRules: rules, ID: bucket.BucketID, rev: bucket.Revision, b2: b, }) } return buckets, nil } // URL holds information from the b2_get_upload_url API. type URL struct { uri string token string b2 *B2 bucket *Bucket } // Reload reloads URL in-place, by reissuing a b2_get_upload_url and // overwriting the previous values. func (url *URL) Reload(ctx context.Context) error { n, err := url.bucket.GetUploadURL(ctx) if err != nil { return err } url.uri = n.uri url.token = n.token return nil } // GetUploadURL wraps b2_get_upload_url. func (b *Bucket) GetUploadURL(ctx context.Context) (*URL, error) { b2req := &b2types.GetUploadURLRequest{ BucketID: b.ID, } b2resp := &b2types.GetUploadURLResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_get_upload_url", "POST", b.b2.apiURI+b2types.V1api+"b2_get_upload_url", b2req, b2resp, headers, nil); err != nil { return nil, err } return &URL{ uri: b2resp.URI, token: b2resp.Token, b2: b.b2, bucket: b, }, nil } // File represents a B2 file. type File struct { Name string Size int64 Status string Timestamp time.Time Info *FileInfo ID string b2 *B2 } // File returns a bare File struct, but with the appropriate id and b2 // interfaces. func (b *Bucket) File(id, name string) *File { return &File{ID: id, b2: b.b2, Name: name} } // UploadFile wraps b2_upload_file. func (url *URL) UploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (*File, error) { headers := map[string]string{ "Authorization": url.token, "X-Bz-File-Name": name, "Content-Type": contentType, "Content-Length": fmt.Sprintf("%d", size), "X-Bz-Content-Sha1": sha1, } for k, v := range info { headers[fmt.Sprintf("X-Bz-Info-%s", k)] = v } b2resp := &b2types.UploadFileResponse{} if err := url.b2.opts.makeRequest(ctx, "b2_upload_file", "POST", url.uri, nil, b2resp, headers, &requestBody{body: r, size: int64(size)}); err != nil { return nil, err } return &File{ Name: name, Size: int64(size), Timestamp: millitime(b2resp.Timestamp), Status: b2resp.Action, ID: b2resp.FileID, b2: url.b2, }, nil } // DeleteFileVersion wraps b2_delete_file_version. func (f *File) DeleteFileVersion(ctx context.Context) error { b2req := &b2types.DeleteFileVersionRequest{ Name: f.Name, FileID: f.ID, } headers := map[string]string{ "Authorization": f.b2.authToken, } return f.b2.opts.makeRequest(ctx, "b2_delete_file_version", "POST", f.b2.apiURI+b2types.V1api+"b2_delete_file_version", b2req, nil, headers, nil) } // LargeFile holds information necessary to implement B2 large file support. type LargeFile struct { ID string b2 *B2 mu sync.Mutex size int64 hashes map[int]string } // StartLargeFile wraps b2_start_large_file. func (b *Bucket) StartLargeFile(ctx context.Context, name, contentType string, info map[string]string) (*LargeFile, error) { b2req := &b2types.StartLargeFileRequest{ BucketID: b.ID, Name: name, ContentType: contentType, Info: info, } b2resp := &b2types.StartLargeFileResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_start_large_file", "POST", b.b2.apiURI+b2types.V1api+"b2_start_large_file", b2req, b2resp, headers, nil); err != nil { return nil, err } return &LargeFile{ ID: b2resp.ID, b2: b.b2, hashes: make(map[int]string), }, nil } // CancelLargeFile wraps b2_cancel_large_file. func (l *LargeFile) CancelLargeFile(ctx context.Context) error { b2req := &b2types.CancelLargeFileRequest{ ID: l.ID, } headers := map[string]string{ "Authorization": l.b2.authToken, } return l.b2.opts.makeRequest(ctx, "b2_cancel_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_cancel_large_file", b2req, nil, headers, nil) } // FilePart is a piece of a started, but not finished, large file upload. type FilePart struct { Number int SHA1 string Size int64 } // ListParts wraps b2_list_parts. func (f *File) ListParts(ctx context.Context, next, count int) ([]*FilePart, int, error) { b2req := &b2types.ListPartsRequest{ ID: f.ID, Start: next, Count: count, } b2resp := &b2types.ListPartsResponse{} headers := map[string]string{ "Authorization": f.b2.authToken, } if err := f.b2.opts.makeRequest(ctx, "b2_list_parts", "POST", f.b2.apiURI+b2types.V1api+"b2_list_parts", b2req, b2resp, headers, nil); err != nil { return nil, 0, err } var parts []*FilePart for _, part := range b2resp.Parts { parts = append(parts, &FilePart{ Number: part.Number, SHA1: part.SHA1, Size: part.Size, }) } return parts, b2resp.Next, nil } // CompileParts returns a LargeFile that can accept new data. Seen is a // mapping of completed part numbers to SHA1 strings; size is the total size of // all the completed parts to this point. func (f *File) CompileParts(size int64, seen map[int]string) *LargeFile { s := make(map[int]string) for k, v := range seen { s[k] = v } return &LargeFile{ ID: f.ID, b2: f.b2, size: size, hashes: s, } } // FileChunk holds information necessary for uploading file chunks. type FileChunk struct { url string token string file *LargeFile } type getUploadPartURLRequest struct { ID string `json:"fileId"` } type getUploadPartURLResponse struct { URL string `json:"uploadUrl"` Token string `json:"authorizationToken"` } // GetUploadPartURL wraps b2_get_upload_part_url. func (l *LargeFile) GetUploadPartURL(ctx context.Context) (*FileChunk, error) { b2req := &getUploadPartURLRequest{ ID: l.ID, } b2resp := &getUploadPartURLResponse{} headers := map[string]string{ "Authorization": l.b2.authToken, } if err := l.b2.opts.makeRequest(ctx, "b2_get_upload_part_url", "POST", l.b2.apiURI+b2types.V1api+"b2_get_upload_part_url", b2req, b2resp, headers, nil); err != nil { return nil, err } return &FileChunk{ url: b2resp.URL, token: b2resp.Token, file: l, }, nil } // Reload reloads FileChunk in-place. func (fc *FileChunk) Reload(ctx context.Context) error { n, err := fc.file.GetUploadPartURL(ctx) if err != nil { return err } fc.url = n.url fc.token = n.token return nil } // UploadPart wraps b2_upload_part. func (fc *FileChunk) UploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (int, error) { headers := map[string]string{ "Authorization": fc.token, "X-Bz-Part-Number": fmt.Sprintf("%d", index), "Content-Length": fmt.Sprintf("%d", size), "X-Bz-Content-Sha1": sha1, } if sha1 == "hex_digits_at_end" { r = &keepFinalBytes{r: r, remain: size} } if err := fc.file.b2.opts.makeRequest(ctx, "b2_upload_part", "POST", fc.url, nil, nil, headers, &requestBody{body: r, size: int64(size)}); err != nil { return 0, err } fc.file.mu.Lock() if sha1 == "hex_digits_at_end" { sha1 = string(r.(*keepFinalBytes).sha[:]) } fc.file.hashes[index] = sha1 fc.file.size += int64(size) fc.file.mu.Unlock() return size, nil } // FinishLargeFile wraps b2_finish_large_file. func (l *LargeFile) FinishLargeFile(ctx context.Context) (*File, error) { l.mu.Lock() defer l.mu.Unlock() b2req := &b2types.FinishLargeFileRequest{ ID: l.ID, Hashes: make([]string, len(l.hashes)), } b2resp := &b2types.FinishLargeFileResponse{} for k, v := range l.hashes { if len(b2req.Hashes) < k { return nil, fmt.Errorf("b2_finish_large_file: invalid index %d", k) } b2req.Hashes[k-1] = v } headers := map[string]string{ "Authorization": l.b2.authToken, } if err := l.b2.opts.makeRequest(ctx, "b2_finish_large_file", "POST", l.b2.apiURI+b2types.V1api+"b2_finish_large_file", b2req, b2resp, headers, nil); err != nil { return nil, err } return &File{ Name: b2resp.Name, Size: l.size, Timestamp: millitime(b2resp.Timestamp), Status: b2resp.Action, ID: b2resp.FileID, b2: l.b2, }, nil } // ListUnfinishedLargeFiles wraps b2_list_unfinished_large_files. func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]*File, string, error) { b2req := &b2types.ListUnfinishedLargeFilesRequest{ BucketID: b.ID, Continuation: continuation, Count: count, } b2resp := &b2types.ListUnfinishedLargeFilesResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_list_unfinished_large_files", "POST", b.b2.apiURI+b2types.V1api+"b2_list_unfinished_large_files", b2req, b2resp, headers, nil); err != nil { return nil, "", err } cont := b2resp.Continuation var files []*File for _, f := range b2resp.Files { files = append(files, &File{ Name: f.Name, Timestamp: millitime(f.Timestamp), b2: b.b2, ID: f.FileID, Info: &FileInfo{ Name: f.Name, ContentType: f.ContentType, Info: f.Info, Timestamp: millitime(f.Timestamp), }, }) } return files, cont, nil } // ListFileNames wraps b2_list_file_names. func (b *Bucket) ListFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]*File, string, error) { if prefix == "" { prefix = b.b2.pfx } b2req := &b2types.ListFileNamesRequest{ Count: count, Continuation: continuation, BucketID: b.ID, Prefix: prefix, Delimiter: delimiter, } b2resp := &b2types.ListFileNamesResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_list_file_names", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_names", b2req, b2resp, headers, nil); err != nil { return nil, "", err } cont := b2resp.Continuation var files []*File for _, f := range b2resp.Files { files = append(files, &File{ Name: f.Name, Size: f.Size, Status: f.Action, Timestamp: millitime(f.Timestamp), Info: &FileInfo{ Name: f.Name, SHA1: f.SHA1, MD5: f.MD5, Size: f.Size, ContentType: f.ContentType, Info: f.Info, Status: f.Action, Timestamp: millitime(f.Timestamp), }, ID: f.FileID, b2: b.b2, }) } return files, cont, nil } // ListFileVersions wraps b2_list_file_versions. func (b *Bucket) ListFileVersions(ctx context.Context, count int, startName, startID, prefix, delimiter string) ([]*File, string, string, error) { if prefix == "" { prefix = b.b2.pfx } b2req := &b2types.ListFileVersionsRequest{ BucketID: b.ID, Count: count, StartName: startName, StartID: startID, Prefix: prefix, Delimiter: delimiter, } b2resp := &b2types.ListFileVersionsResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_list_file_versions", "POST", b.b2.apiURI+b2types.V1api+"b2_list_file_versions", b2req, b2resp, headers, nil); err != nil { return nil, "", "", err } var files []*File for _, f := range b2resp.Files { files = append(files, &File{ Name: f.Name, Size: f.Size, Status: f.Action, Timestamp: millitime(f.Timestamp), Info: &FileInfo{ Name: f.Name, SHA1: f.SHA1, MD5: f.MD5, Size: f.Size, ContentType: f.ContentType, Info: f.Info, Status: f.Action, Timestamp: millitime(f.Timestamp), }, ID: f.FileID, b2: b.b2, }) } return files, b2resp.NextName, b2resp.NextID, nil } // GetDownloadAuthorization wraps b2_get_download_authorization. func (b *Bucket) GetDownloadAuthorization(ctx context.Context, prefix string, valid time.Duration, contentDisposition string) (string, error) { b2req := &b2types.GetDownloadAuthorizationRequest{ BucketID: b.ID, Prefix: prefix, Valid: int(valid.Seconds()), ContentDisposition: contentDisposition, } b2resp := &b2types.GetDownloadAuthorizationResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_get_download_authorization", "POST", b.b2.apiURI+b2types.V1api+"b2_get_download_authorization", b2req, b2resp, headers, nil); err != nil { return "", err } return b2resp.Token, nil } // FileReader is an io.ReadCloser that downloads a file from B2. type FileReader struct { io.ReadCloser ContentLength int ContentType string SHA1 string ID string Info map[string]string } func mkRange(offset, size int64) string { if offset == 0 && size == 0 { return "" } if size == 0 { return fmt.Sprintf("bytes=%d-", offset) } return fmt.Sprintf("bytes=%d-%d", offset, offset+size-1) } // DownloadFileByName wraps b2_download_file_by_name. func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, size int64, header bool) (*FileReader, error) { uri := fmt.Sprintf("%s/file/%s/%s", b.b2.downloadURI, b.Name, escape(name)) method := "GET" if header { method = "HEAD" } req, err := http.NewRequest(method, uri, nil) if err != nil { return nil, err } req.Header.Set("Authorization", b.b2.authToken) req.Header.Set("X-Blazer-Request-ID", fmt.Sprintf("%d", atomic.AddInt64(&reqID, 1))) req.Header.Set("X-Blazer-Method", "b2_download_file_by_name") b.b2.opts.addHeaders(req) rng := mkRange(offset, size) if rng != "" { req.Header.Set("Range", rng) } logRequest(req, nil) resp, err := makeNetRequest(ctx, req, b.b2.opts.getTransport()) if err != nil { return nil, err } logResponse(resp, nil) if resp.StatusCode != 200 && resp.StatusCode != 206 { defer resp.Body.Close() return nil, mkErr(resp) } clen, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { resp.Body.Close() return nil, err } info := make(map[string]string) for key := range resp.Header { if !strings.HasPrefix(key, "X-Bz-Info-") { continue } name, err := unescape(strings.TrimPrefix(key, "X-Bz-Info-")) if err != nil { resp.Body.Close() return nil, err } val, err := unescape(resp.Header.Get(key)) if err != nil { resp.Body.Close() return nil, err } info[name] = val } sha1 := resp.Header.Get("X-Bz-Content-Sha1") if sha1 == "none" && info["Large_file_sha1"] != "" { sha1 = info["Large_file_sha1"] } return &FileReader{ ReadCloser: resp.Body, SHA1: sha1, ID: resp.Header.Get("X-Bz-File-Id"), ContentType: resp.Header.Get("Content-Type"), ContentLength: int(clen), Info: info, }, nil } // HideFile wraps b2_hide_file. func (b *Bucket) HideFile(ctx context.Context, name string) (*File, error) { b2req := &b2types.HideFileRequest{ BucketID: b.ID, File: name, } b2resp := &b2types.HideFileResponse{} headers := map[string]string{ "Authorization": b.b2.authToken, } if err := b.b2.opts.makeRequest(ctx, "b2_hide_file", "POST", b.b2.apiURI+b2types.V1api+"b2_hide_file", b2req, b2resp, headers, nil); err != nil { return nil, err } return &File{ Status: b2resp.Action, Name: name, Timestamp: millitime(b2resp.Timestamp), b2: b.b2, ID: b2resp.ID, }, nil } // FileInfo holds information about a specific file. type FileInfo struct { Name string SHA1 string MD5 string Size int64 ContentType string Info map[string]string Status string Timestamp time.Time } // GetFileInfo wraps b2_get_file_info. func (f *File) GetFileInfo(ctx context.Context) (*FileInfo, error) { b2req := &b2types.GetFileInfoRequest{ ID: f.ID, } b2resp := &b2types.GetFileInfoResponse{} headers := map[string]string{ "Authorization": f.b2.authToken, } if err := f.b2.opts.makeRequest(ctx, "b2_get_file_info", "POST", f.b2.apiURI+b2types.V1api+"b2_get_file_info", b2req, b2resp, headers, nil); err != nil { return nil, err } f.Status = b2resp.Action f.Name = b2resp.Name f.Timestamp = millitime(b2resp.Timestamp) f.Info = &FileInfo{ Name: b2resp.Name, SHA1: b2resp.SHA1, MD5: b2resp.MD5, Size: b2resp.Size, ContentType: b2resp.ContentType, Info: b2resp.Info, Status: b2resp.Action, Timestamp: millitime(b2resp.Timestamp), } return f.Info, nil } // Key is a B2 application key. type Key struct { ID string Secret string Name string Capabilities []string Expires time.Time b2 *B2 } // CreateKey wraps b2_create_key. func (b *B2) CreateKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (*Key, error) { b2req := &b2types.CreateKeyRequest{ AccountID: b.accountID, Capabilities: caps, Name: name, Valid: int(valid.Seconds()), BucketID: bucketID, Prefix: prefix, } b2resp := &b2types.CreateKeyResponse{} headers := map[string]string{ "Authorization": b.authToken, } if err := b.opts.makeRequest(ctx, "b2_create_key", "POST", b.apiURI+b2types.V1api+"b2_create_key", b2req, b2resp, headers, nil); err != nil { return nil, err } return &Key{ Name: b2resp.Name, ID: b2resp.ID, Secret: b2resp.Secret, Capabilities: b2resp.Capabilities, Expires: millitime(b2resp.Expires), b2: b, }, nil } // Delete wraps b2_delete_key. func (k *Key) Delete(ctx context.Context) error { b2req := &b2types.DeleteKeyRequest{ KeyID: k.ID, } headers := map[string]string{ "Authorization": k.b2.authToken, } return k.b2.opts.makeRequest(ctx, "b2_delete_key", "POST", k.b2.apiURI+b2types.V1api+"b2_delete_key", b2req, nil, headers, nil) } // ListKeys wraps b2_list_keys. func (b *B2) ListKeys(ctx context.Context, max int, next string) ([]*Key, string, error) { b2req := &b2types.ListKeysRequest{ AccountID: b.accountID, Max: max, Next: next, } headers := map[string]string{ "Authorization": b.authToken, } b2resp := &b2types.ListKeysResponse{} if err := b.opts.makeRequest(ctx, "b2_list_keys", "POST", b.apiURI+b2types.V1api+"b2_list_keys", b2req, b2resp, headers, nil); err != nil { return nil, "", err } var keys []*Key for _, key := range b2resp.Keys { keys = append(keys, &Key{ Name: key.Name, ID: key.ID, Capabilities: key.Capabilities, Expires: millitime(key.Expires), b2: b, }) } return keys, b2resp.Next, nil } blazer-0.6.1/base/integration_test.go000066400000000000000000000512171451327606200176340ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package base import ( "bytes" "crypto/sha1" "crypto/tls" "crypto/x509" "encoding/json" "fmt" "io" "net/http" "os" "reflect" "strings" "testing" "time" "github.com/Backblaze/blazer/x/transport" "context" ) const ( apiID = "B2_ACCOUNT_ID" apiKey = "B2_SECRET_KEY" ) const ( bucketName = "base-tests" smallFileName = "TeenyTiny" largeFileName = "BigBytes" ) type zReader struct{} func (zReader) Read(p []byte) (int, error) { return len(p), nil } func TestStorage(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() // b2_authorize_account b2, err := AuthorizeAccount(ctx, id, key, UserAgent("blazer-base-test")) if err != nil { t.Fatal(err) } // b2_create_bucket infoKey := "key" infoVal := "val" m := map[string]string{infoKey: infoVal} rules := []LifecycleRule{ { Prefix: "what/", DaysNewUntilHidden: 5, }, } bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", m, rules) if err != nil { t.Fatal(err) } if bucket.Info[infoKey] != infoVal { t.Errorf("%s: bucketInfo[%q] got %q, want %q", bucket.Name, infoKey, bucket.Info[infoKey], infoVal) } if len(bucket.LifecycleRules) != 1 { t.Errorf("%s: lifecycle rules: got %d rules, wanted 1", bucket.Name, len(bucket.LifecycleRules)) } defer func() { // b2_delete_bucket if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() // b2_update_bucket bucket.Info["new"] = "yay" bucket.LifecycleRules = nil // Unset options should be a noop. newBucket, err := bucket.Update(ctx) if err != nil { t.Errorf("%s: update bucket: %v", bucket.Name, err) return } bucket = newBucket if bucket.Info["new"] != "yay" { t.Errorf("%s: info key \"new\": got %s, want \"yay\"", bucket.Name, bucket.Info["new"]) } if len(bucket.LifecycleRules) != 1 { t.Errorf("%s: lifecycle rules: got %d rules, wanted 1", bucket.Name, len(bucket.LifecycleRules)) } // b2_list_buckets buckets, err := b2.ListBuckets(ctx, "") if err != nil { t.Fatal(err) } var found bool for _, bucket := range buckets { if bucket.Name == bname { found = true break } } if !found { t.Errorf("%s: new bucket not found", bname) } buckets, err = b2.ListBuckets(ctx, bname) if len(buckets) != 1 { t.Errorf("excpected exactly 1 bucket, got %d", len(buckets)) } else { if buckets[0].Name != bname { t.Errorf("got %s, want %s", bname, buckets[0].Name) } } // b2_get_upload_url ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } // b2_upload_file smallFile := io.LimitReader(zReader{}, 1024*50) // 50k hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) smallInfoMap := map[string]string{ "one": "1", "two": "2", } file, err := ue.UploadFile(ctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, smallInfoMap) if err != nil { t.Fatal(err) } defer func() { // b2_delete_file_version if err := file.DeleteFileVersion(ctx); err != nil { t.Error(err) } }() // b2_start_large_file largeInfoMap := map[string]string{ "one_billion": "1e9", "two_trillion": "2eSomething, I guess 2e12", } lf, err := bucket.StartLargeFile(ctx, largeFileName, "application/octet-stream", largeInfoMap) if err != nil { t.Fatal(err) } // b2_get_upload_part_url fc, err := lf.GetUploadPartURL(ctx) if err != nil { t.Fatal(err) } // b2_upload_part largeFile := io.LimitReader(zReader{}, 10e6) // 10M for i := 0; i < 2; i++ { r := io.LimitReader(largeFile, 5e6) // 5M hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, r); err != nil { t.Error(err) } if _, err := fc.UploadPart(ctx, buf, fmt.Sprintf("%x", hash.Sum(nil)), buf.Len(), i+1); err != nil { t.Error(err) } } // b2_finish_large_file lfile, err := lf.FinishLargeFile(ctx) if err != nil { t.Fatal(err) } // b2_get_file_info smallInfo, err := file.GetFileInfo(ctx) if err != nil { t.Fatal(err) } compareFileAndInfo(t, smallInfo, smallFileName, smallSHA1, smallInfoMap) largeInfo, err := lfile.GetFileInfo(ctx) if err != nil { t.Fatal(err) } compareFileAndInfo(t, largeInfo, largeFileName, "none", largeInfoMap) defer func() { if err := lfile.DeleteFileVersion(ctx); err != nil { t.Error(err) } }() clf, err := bucket.StartLargeFile(ctx, largeFileName, "application/octet-stream", nil) if err != nil { t.Fatal(err) } // b2_cancel_large_file if err := clf.CancelLargeFile(ctx); err != nil { t.Fatal(err) } // b2_list_file_names files, _, err := bucket.ListFileNames(ctx, 100, "", "", "") if err != nil { t.Fatal(err) } if len(files) != 2 { t.Errorf("expected 2 files, got %d: %v", len(files), files) } // b2_download_file_by_name fr, err := bucket.DownloadFileByName(ctx, smallFileName, 0, 0, false) if err != nil { t.Fatal(err) } if fr.SHA1 != smallSHA1 { t.Errorf("small file SHAs don't match: got %q, want %q", fr.SHA1, smallSHA1) } lbuf := &bytes.Buffer{} if _, err := io.Copy(lbuf, fr); err != nil { t.Fatal(err) } if lbuf.Len() != fr.ContentLength { t.Errorf("small file retreived lengths don't match: got %d, want %d", lbuf.Len(), fr.ContentLength) } // b2_hide_file hf, err := bucket.HideFile(ctx, smallFileName) if err != nil { t.Fatal(err) } defer func() { if err := hf.DeleteFileVersion(ctx); err != nil { t.Error(err) } }() // b2_list_file_versions files, _, _, err = bucket.ListFileVersions(ctx, 100, "", "", "", "") if err != nil { t.Fatal(err) } if len(files) != 3 { t.Errorf("expected 3 files, got %d: %v", len(files), files) } // b2_get_download_authorization if _, err := bucket.GetDownloadAuthorization(ctx, "foo/", 24*time.Hour, "attachment"); err != nil { t.Errorf("failed to get download auth token: %v", err) } } func TestUploadAuthAfterConnectionHang(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() hung := make(chan struct{}) // An http.RoundTripper that dies and hangs after sending ~10k bytes. hang := func() { close(hung) select {} } tport := transport.WithFailures(nil, transport.AfterNBytes(10000, hang)) b2, err := AuthorizeAccount(ctx, id, key, Transport(tport)) if err != nil { t.Fatal(err) } bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", nil, nil) if err != nil { t.Fatal(err) } defer func() { if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } smallFile := io.LimitReader(zReader{}, 1024*50) // 50k hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) go func() { ue.UploadFile(ctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil) }() <-hung // Do the whole thing again with the same upload auth, before the remote end // notices we're gone. smallFile = io.LimitReader(zReader{}, 1024*50) // 50k again buf.Reset() if _, err := io.Copy(buf, smallFile); err != nil { t.Error(err) } file, err := ue.UploadFile(ctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil) if err == nil { t.Error("expected an error, got none") if err := file.DeleteFileVersion(ctx); err != nil { t.Error(err) } } if Action(err) != AttemptNewUpload { t.Errorf("Action(%v): got %v, want AttemptNewUpload", err, Action(err)) } } func TestCancelledContextCancelsHTTPRequest(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() tport := transport.WithFailures(nil, transport.MatchPathSubstring("b2_upload_file"), transport.FailureRate(1), transport.Stall(2*time.Second)) b2, err := AuthorizeAccount(ctx, id, key, Transport(tport)) if err != nil { t.Fatal(err) } bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", nil, nil) if err != nil { t.Fatal(err) } defer func() { if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } smallFile := io.LimitReader(zReader{}, 1024*50) // 50k hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) cctx, cancel := context.WithCancel(ctx) go func() { time.Sleep(1) cancel() }() if _, err := ue.UploadFile(cctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil); err != context.Canceled { t.Errorf("expected canceled context, but got %v", err) } } func TestDeadlineExceededContextCancelsHTTPRequest(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() tport := transport.WithFailures(nil, transport.MatchPathSubstring("b2_upload_file"), transport.FailureRate(1), transport.Stall(2*time.Second)) b2, err := AuthorizeAccount(ctx, id, key, Transport(tport)) if err != nil { t.Fatal(err) } bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", nil, nil) if err != nil { t.Fatal(err) } defer func() { if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } smallFile := io.LimitReader(zReader{}, 1024*50) // 50k hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) cctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() if _, err := ue.UploadFile(cctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil); err != context.DeadlineExceeded { t.Errorf("expected deadline exceeded error, but got %v", err) } } func TestUnknownCA(t *testing.T) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() tport := &http.Transport{ TLSClientConfig: &tls.Config{RootCAs: x509.NewCertPool()}, } _, err := AuthorizeAccount(ctx, id, key, Transport(tport)) if err == nil { t.Error("expected an error, got none") } if Action(err) != Punt { t.Errorf("Action(%v): got %v, want Punt", err, Action(err)) } } func compareFileAndInfo(t *testing.T, info *FileInfo, name, sha1 string, imap map[string]string) { if info.Name != name { t.Errorf("got %q, want %q", info.Name, name) } if info.SHA1 != sha1 { t.Errorf("got %q, want %q", info.SHA1, sha1) } if !reflect.DeepEqual(info.Info, imap) { t.Errorf("got %v, want %v", info.Info, imap) } } // from https://www.backblaze.com/b2/docs/string_encoding.html var testCases = `[ {"fullyEncoded": "%20", "minimallyEncoded": "+", "string": " "}, {"fullyEncoded": "%21", "minimallyEncoded": "!", "string": "!"}, {"fullyEncoded": "%22", "minimallyEncoded": "%22", "string": "\""}, {"fullyEncoded": "%23", "minimallyEncoded": "%23", "string": "#"}, {"fullyEncoded": "%24", "minimallyEncoded": "$", "string": "$"}, {"fullyEncoded": "%25", "minimallyEncoded": "%25", "string": "%"}, {"fullyEncoded": "%26", "minimallyEncoded": "%26", "string": "&"}, {"fullyEncoded": "%27", "minimallyEncoded": "'", "string": "'"}, {"fullyEncoded": "%28", "minimallyEncoded": "(", "string": "("}, {"fullyEncoded": "%29", "minimallyEncoded": ")", "string": ")"}, {"fullyEncoded": "%2A", "minimallyEncoded": "*", "string": "*"}, {"fullyEncoded": "%2B", "minimallyEncoded": "%2B", "string": "+"}, {"fullyEncoded": "%2C", "minimallyEncoded": "%2C", "string": ","}, {"fullyEncoded": "%2D", "minimallyEncoded": "-", "string": "-"}, {"fullyEncoded": "%2E", "minimallyEncoded": ".", "string": "."}, {"fullyEncoded": "/", "minimallyEncoded": "/", "string": "/"}, {"fullyEncoded": "%30", "minimallyEncoded": "0", "string": "0"}, {"fullyEncoded": "%31", "minimallyEncoded": "1", "string": "1"}, {"fullyEncoded": "%32", "minimallyEncoded": "2", "string": "2"}, {"fullyEncoded": "%33", "minimallyEncoded": "3", "string": "3"}, {"fullyEncoded": "%34", "minimallyEncoded": "4", "string": "4"}, {"fullyEncoded": "%35", "minimallyEncoded": "5", "string": "5"}, {"fullyEncoded": "%36", "minimallyEncoded": "6", "string": "6"}, {"fullyEncoded": "%37", "minimallyEncoded": "7", "string": "7"}, {"fullyEncoded": "%38", "minimallyEncoded": "8", "string": "8"}, {"fullyEncoded": "%39", "minimallyEncoded": "9", "string": "9"}, {"fullyEncoded": "%3A", "minimallyEncoded": ":", "string": ":"}, {"fullyEncoded": "%3B", "minimallyEncoded": ";", "string": ";"}, {"fullyEncoded": "%3C", "minimallyEncoded": "%3C", "string": "<"}, {"fullyEncoded": "%3D", "minimallyEncoded": "=", "string": "="}, {"fullyEncoded": "%3E", "minimallyEncoded": "%3E", "string": ">"}, {"fullyEncoded": "%3F", "minimallyEncoded": "%3F", "string": "?"}, {"fullyEncoded": "%40", "minimallyEncoded": "@", "string": "@"}, {"fullyEncoded": "%41", "minimallyEncoded": "A", "string": "A"}, {"fullyEncoded": "%42", "minimallyEncoded": "B", "string": "B"}, {"fullyEncoded": "%43", "minimallyEncoded": "C", "string": "C"}, {"fullyEncoded": "%44", "minimallyEncoded": "D", "string": "D"}, {"fullyEncoded": "%45", "minimallyEncoded": "E", "string": "E"}, {"fullyEncoded": "%46", "minimallyEncoded": "F", "string": "F"}, {"fullyEncoded": "%47", "minimallyEncoded": "G", "string": "G"}, {"fullyEncoded": "%48", "minimallyEncoded": "H", "string": "H"}, {"fullyEncoded": "%49", "minimallyEncoded": "I", "string": "I"}, {"fullyEncoded": "%4A", "minimallyEncoded": "J", "string": "J"}, {"fullyEncoded": "%4B", "minimallyEncoded": "K", "string": "K"}, {"fullyEncoded": "%4C", "minimallyEncoded": "L", "string": "L"}, {"fullyEncoded": "%4D", "minimallyEncoded": "M", "string": "M"}, {"fullyEncoded": "%4E", "minimallyEncoded": "N", "string": "N"}, {"fullyEncoded": "%4F", "minimallyEncoded": "O", "string": "O"}, {"fullyEncoded": "%50", "minimallyEncoded": "P", "string": "P"}, {"fullyEncoded": "%51", "minimallyEncoded": "Q", "string": "Q"}, {"fullyEncoded": "%52", "minimallyEncoded": "R", "string": "R"}, {"fullyEncoded": "%53", "minimallyEncoded": "S", "string": "S"}, {"fullyEncoded": "%54", "minimallyEncoded": "T", "string": "T"}, {"fullyEncoded": "%55", "minimallyEncoded": "U", "string": "U"}, {"fullyEncoded": "%56", "minimallyEncoded": "V", "string": "V"}, {"fullyEncoded": "%57", "minimallyEncoded": "W", "string": "W"}, {"fullyEncoded": "%58", "minimallyEncoded": "X", "string": "X"}, {"fullyEncoded": "%59", "minimallyEncoded": "Y", "string": "Y"}, {"fullyEncoded": "%5A", "minimallyEncoded": "Z", "string": "Z"}, {"fullyEncoded": "%5B", "minimallyEncoded": "%5B", "string": "["}, {"fullyEncoded": "%5C", "minimallyEncoded": "%5C", "string": "\\"}, {"fullyEncoded": "%5D", "minimallyEncoded": "%5D", "string": "]"}, {"fullyEncoded": "%5E", "minimallyEncoded": "%5E", "string": "^"}, {"fullyEncoded": "%5F", "minimallyEncoded": "_", "string": "_"}, {"fullyEncoded": "%60", "minimallyEncoded": "%60", "string": "` + "`" + `"}, {"fullyEncoded": "%61", "minimallyEncoded": "a", "string": "a"}, {"fullyEncoded": "%62", "minimallyEncoded": "b", "string": "b"}, {"fullyEncoded": "%63", "minimallyEncoded": "c", "string": "c"}, {"fullyEncoded": "%64", "minimallyEncoded": "d", "string": "d"}, {"fullyEncoded": "%65", "minimallyEncoded": "e", "string": "e"}, {"fullyEncoded": "%66", "minimallyEncoded": "f", "string": "f"}, {"fullyEncoded": "%67", "minimallyEncoded": "g", "string": "g"}, {"fullyEncoded": "%68", "minimallyEncoded": "h", "string": "h"}, {"fullyEncoded": "%69", "minimallyEncoded": "i", "string": "i"}, {"fullyEncoded": "%6A", "minimallyEncoded": "j", "string": "j"}, {"fullyEncoded": "%6B", "minimallyEncoded": "k", "string": "k"}, {"fullyEncoded": "%6C", "minimallyEncoded": "l", "string": "l"}, {"fullyEncoded": "%6D", "minimallyEncoded": "m", "string": "m"}, {"fullyEncoded": "%6E", "minimallyEncoded": "n", "string": "n"}, {"fullyEncoded": "%6F", "minimallyEncoded": "o", "string": "o"}, {"fullyEncoded": "%70", "minimallyEncoded": "p", "string": "p"}, {"fullyEncoded": "%71", "minimallyEncoded": "q", "string": "q"}, {"fullyEncoded": "%72", "minimallyEncoded": "r", "string": "r"}, {"fullyEncoded": "%73", "minimallyEncoded": "s", "string": "s"}, {"fullyEncoded": "%74", "minimallyEncoded": "t", "string": "t"}, {"fullyEncoded": "%75", "minimallyEncoded": "u", "string": "u"}, {"fullyEncoded": "%76", "minimallyEncoded": "v", "string": "v"}, {"fullyEncoded": "%77", "minimallyEncoded": "w", "string": "w"}, {"fullyEncoded": "%78", "minimallyEncoded": "x", "string": "x"}, {"fullyEncoded": "%79", "minimallyEncoded": "y", "string": "y"}, {"fullyEncoded": "%7A", "minimallyEncoded": "z", "string": "z"}, {"fullyEncoded": "%7B", "minimallyEncoded": "%7B", "string": "{"}, {"fullyEncoded": "%7C", "minimallyEncoded": "%7C", "string": "|"}, {"fullyEncoded": "%7D", "minimallyEncoded": "%7D", "string": "}"}, {"fullyEncoded": "%7E", "minimallyEncoded": "~", "string": "~"}, {"fullyEncoded": "%7F", "minimallyEncoded": "%7F", "string": "\u007f"}, {"fullyEncoded": "%E8%87%AA%E7%94%B1", "minimallyEncoded": "%E8%87%AA%E7%94%B1", "string": "\u81ea\u7531"}, {"fullyEncoded": "%F0%90%90%80", "minimallyEncoded": "%F0%90%90%80", "string": "\ud801\udc00"} ]` type testCase struct { Full string `json:"fullyEncoded"` Min string `json:"minimallyEncoded"` Raw string `json:"string"` } func TestEscapes(t *testing.T) { dec := json.NewDecoder(strings.NewReader(testCases)) var tcs []testCase if err := dec.Decode(&tcs); err != nil { t.Fatal(err) } for _, tc := range tcs { en := escape(tc.Raw) if !(en == tc.Full || en == tc.Min) { t.Errorf("encode %q: got %q, want %q or %q", tc.Raw, en, tc.Min, tc.Full) } m, err := unescape(tc.Min) if err != nil { t.Errorf("decode %q: %v", tc.Min, err) } if m != tc.Raw { t.Errorf("decode %q: got %q, want %q", tc.Min, m, tc.Raw) } f, err := unescape(tc.Full) if err != nil { t.Errorf("decode %q: %v", tc.Full, err) } if f != tc.Raw { t.Errorf("decode %q: got %q, want %q", tc.Full, f, tc.Raw) } } } func TestUploadDownloadFilenameEscaping(t *testing.T) { filename := "file%foo.txt" id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") } ctx := context.Background() // b2_authorize_account b2, err := AuthorizeAccount(ctx, id, key, UserAgent("blazer-base-test")) if err != nil { t.Fatal(err) } // b2_create_bucket bname := id + "-" + bucketName bucket, err := b2.CreateBucket(ctx, bname, "", nil, nil) if err != nil { t.Fatal(err) } defer func() { // b2_delete_bucket if err := bucket.DeleteBucket(ctx); err != nil { t.Error(err) } }() // b2_get_upload_url ue, err := bucket.GetUploadURL(ctx) if err != nil { t.Fatal(err) } // b2_upload_file smallFile := io.LimitReader(zReader{}, 128) hash := sha1.New() buf := &bytes.Buffer{} w := io.MultiWriter(hash, buf) if _, err := io.Copy(w, smallFile); err != nil { t.Error(err) } smallSHA1 := fmt.Sprintf("%x", hash.Sum(nil)) file, err := ue.UploadFile(ctx, buf, buf.Len(), filename, "application/octet-stream", smallSHA1, nil) if err != nil { t.Fatal(err) } defer func() { // b2_delete_file_version if err := file.DeleteFileVersion(ctx); err != nil { t.Error(err) } }() // b2_download_file_by_name fr, err := bucket.DownloadFileByName(ctx, filename, 0, 0, false) if err != nil { t.Fatal(err) } lbuf := &bytes.Buffer{} if _, err := io.Copy(lbuf, fr); err != nil { t.Fatal(err) } } blazer-0.6.1/base/strings.go000066400000000000000000000014541451327606200157410ustar00rootroot00000000000000// Copyright 2017, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package base import ( "net/url" "strings" ) func escape(s string) string { return strings.Replace(url.QueryEscape(s), "%2F", "/", -1) } func unescape(s string) (string, error) { return url.QueryUnescape(s) } blazer-0.6.1/base/strings_test.go000066400000000000000000000016671451327606200170060ustar00rootroot00000000000000package base import ( "fmt" "testing" ) func TestEncodeDecode(t *testing.T) { // crashes identified by go-fuzz origs := []string{ "&\x020000", "&\x020000\x9c", "&\x020\x9c0", "&\x0230j", "&\x02\x98000", "&\x02\x983\xc8j00", "00\x000", "00\x0000", "00\x0000000000000", "\x11\x030", } for _, orig := range origs { escaped := escape(orig) unescaped, err := unescape(escaped) if err != nil { t.Errorf("%s: orig: %#v, escaped: %#v, unescaped: %#v\n", err.Error(), orig, escaped, unescaped) continue } if unescaped != orig { t.Errorf("expected: %#v, got: %#v", orig, unescaped) } } } // hook for go-fuzz: https://github.com/dvyukov/go-fuzz func Fuzz(data []byte) int { orig := string(data) escaped := escape(orig) unescaped, err := unescape(escaped) if err != nil { return 0 } if unescaped != orig { panic(fmt.Sprintf("unescaped: \"%#v\", != orig: \"%#v\"", unescaped, orig)) } return 1 } blazer-0.6.1/bin/000077500000000000000000000000001451327606200135535ustar00rootroot00000000000000blazer-0.6.1/bin/b2keys/000077500000000000000000000000001451327606200147525ustar00rootroot00000000000000blazer-0.6.1/bin/b2keys/b2keys.go000066400000000000000000000045271451327606200165100ustar00rootroot00000000000000// b2keys is a small utility for managing Backblaze B2 keys. package main import ( "context" "flag" "fmt" "os" "time" "github.com/Backblaze/blazer/b2" "github.com/google/subcommands" ) const ( apiID = "B2_ACCOUNT_ID" apiKey = "B2_SECRET_KEY" ) func main() { subcommands.Register(&create{}, "") flag.Parse() ctx := context.Background() os.Exit(int(subcommands.Execute(ctx))) } type create struct { d *time.Duration bucket *string pfx *string } func (c *create) Name() string { return "create" } func (c *create) Synopsis() string { return "create a new application key" } func (c *create) Usage() string { return "b2keys create [-bucket bucket] [-duration duration] [-prefix pfx] name capability [capability ...]" } func (c *create) SetFlags(fs *flag.FlagSet) { c.d = fs.Duration("duration", 0, "the lifetime of the new key") c.bucket = fs.String("bucket", "", "limit the key to the given bucket") c.pfx = fs.String("prefix", "", "limit the key to the objects starting with prefix") } func (c *create) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { fmt.Fprintf(os.Stderr, "both %s and %s must be set in the environment", apiID, apiKey) return subcommands.ExitUsageError } args := f.Args() if len(args) < 2 { fmt.Fprintf(os.Stderr, "%s\n", c.Usage()) return subcommands.ExitUsageError } name := args[0] caps := args[1:] var opts []b2.KeyOption if *c.d > 0 { opts = append(opts, b2.Lifetime(*c.d)) } if *c.pfx != "" { opts = append(opts, b2.Prefix(*c.pfx)) } opts = append(opts, b2.Capabilities(caps...)) client, err := b2.NewClient(ctx, id, key, b2.UserAgent("b2keys")) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return subcommands.ExitFailure } var cr creater = client if *c.bucket != "" { bucket, err := client.Bucket(ctx, *c.bucket) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return subcommands.ExitFailure } cr = bucket } b2key, err := cr.CreateKey(ctx, name, opts...) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return subcommands.ExitFailure } fmt.Printf("key=%s, secret=%s\n", b2key.ID(), b2key.Secret()) return subcommands.ExitSuccess } type creater interface { CreateKey(context.Context, string, ...b2.KeyOption) (*b2.Key, error) } blazer-0.6.1/bin/bonfire/000077500000000000000000000000001451327606200151775ustar00rootroot00000000000000blazer-0.6.1/bin/bonfire/bonfire.go000066400000000000000000000014441451327606200171550ustar00rootroot00000000000000package main import ( "context" "fmt" "net/http" "github.com/Backblaze/blazer/bonfire" "github.com/Backblaze/blazer/internal/pyre" ) type superManager struct { *bonfire.LocalBucket bonfire.FS } func main() { ctx := context.Background() mux := http.NewServeMux() fs := bonfire.FS("/tmp/b2") bm := &bonfire.LocalBucket{Port: 8822} if err := pyre.RegisterServerOnMux(ctx, &pyre.Server{ Account: bonfire.Localhost(8822), LargeFile: fs, Bucket: bm, }, mux); err != nil { fmt.Println(err) return } sm := superManager{ LocalBucket: bm, FS: fs, } pyre.RegisterLargeFileManagerOnMux(fs, mux) pyre.RegisterSimpleFileManagerOnMux(fs, mux) pyre.RegisterDownloadManagerOnMux(sm, mux) fmt.Println("ok") fmt.Println(http.ListenAndServe("localhost:8822", mux)) } blazer-0.6.1/bonfire/000077500000000000000000000000001451327606200144275ustar00rootroot00000000000000blazer-0.6.1/bonfire/bonfire.go000066400000000000000000000134631451327606200164110ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package bonfire implements the B2 service. package bonfire import ( "crypto/sha1" "encoding/json" "errors" "fmt" "io" "os" "path/filepath" "sort" "strconv" "sync" "github.com/Backblaze/blazer/internal/pyre" ) type FS string func (f FS) open(fp string) (io.WriteCloser, error) { if err := os.MkdirAll(filepath.Dir(fp), 0755); err != nil { return nil, err } return os.Create(fp) } func (f FS) PartWriter(id string, part int) (io.WriteCloser, error) { fp := filepath.Join(string(f), id, fmt.Sprintf("%d", part)) return f.open(fp) } func (f FS) Writer(bucket, name, id string) (io.WriteCloser, error) { fp := filepath.Join(string(f), bucket, name, id) return f.open(fp) } func (f FS) Parts(id string) ([]string, error) { dir := filepath.Join(string(f), id) file, err := os.Open(dir) if err != nil { return nil, err } defer file.Close() fs, err := file.Readdir(0) if err != nil { return nil, err } shas := make([]string, len(fs)-1) for _, fi := range fs { if fi.Name() == "info" { continue } i, err := strconv.ParseInt(fi.Name(), 10, 32) if err != nil { return nil, err } p, err := os.Open(filepath.Join(dir, fi.Name())) if err != nil { return nil, err } sha := sha1.New() if _, err := io.Copy(sha, p); err != nil { p.Close() return nil, err } p.Close() shas[int(i)-1] = fmt.Sprintf("%x", sha.Sum(nil)) } return shas, nil } type fi struct { Name string Bucket string } func (f FS) Start(bucketId, fileName, fileId string, bs []byte) error { w, err := f.open(filepath.Join(string(f), fileId, "info")) if err != nil { return err } if err := json.NewEncoder(w).Encode(fi{Name: fileName, Bucket: bucketId}); err != nil { w.Close() return err } return w.Close() } func (f FS) Finish(fileId string) error { r, err := os.Open(filepath.Join(string(f), fileId, "info")) if err != nil { return err } defer r.Close() var info fi if err := json.NewDecoder(r).Decode(&info); err != nil { return err } shas, err := f.Parts(fileId) // oh my god this is terrible if err != nil { return err } w, err := f.open(filepath.Join(string(f), info.Bucket, info.Name, fileId)) if err != nil { return err } for i := 1; i <= len(shas); i++ { r, err := os.Open(filepath.Join(string(f), fileId, fmt.Sprintf("%d", i))) if err != nil { w.Close() return err } if _, err := io.Copy(w, r); err != nil { w.Close() r.Close() return err } r.Close() } if err := w.Close(); err != nil { return err } return os.RemoveAll(filepath.Join(string(f), fileId)) } func (f FS) ObjectByName(bucket, name string) (pyre.DownloadableObject, error) { dir := filepath.Join(string(f), bucket, name) d, err := os.Open(dir) if err != nil { return nil, err } defer d.Close() fis, err := d.Readdir(0) if err != nil { return nil, err } sort.Slice(fis, func(i, j int) bool { return fis[i].ModTime().Before(fis[j].ModTime()) }) o, err := os.Open(filepath.Join(dir, fis[0].Name())) if err != nil { return nil, err } return do{ o: o, size: fis[0].Size(), }, nil } type do struct { size int64 o *os.File } func (d do) Size() int64 { return d.size } func (d do) Reader() io.ReaderAt { return d.o } func (d do) Close() error { return d.o.Close() } func (f FS) Get(fileId string) ([]byte, error) { return nil, nil } type Localhost int func (l Localhost) String() string { return fmt.Sprintf("http://localhost:%d", l) } func (l Localhost) UploadHost(id string) (string, error) { return l.String(), nil } func (Localhost) Authorize(string, string) (string, error) { return "ok", nil } func (Localhost) CheckCreds(string, string) error { return nil } func (l Localhost) APIRoot(string) string { return l.String() } func (l Localhost) DownloadRoot(string) string { return l.String() } func (Localhost) Sizes(string) (int32, int32) { return 1e5, 1 } func (l Localhost) UploadPartHost(fileId string) (string, error) { return l.String(), nil } type LocalBucket struct { Port int mux sync.Mutex b map[string][]byte nti map[string]string } func (lb *LocalBucket) AddBucket(id, name string, bs []byte) error { lb.mux.Lock() defer lb.mux.Unlock() if lb.b == nil { lb.b = make(map[string][]byte) } if lb.nti == nil { lb.nti = make(map[string]string) } lb.b[id] = bs lb.nti[name] = id return nil } func (lb *LocalBucket) RemoveBucket(id string) error { lb.mux.Lock() defer lb.mux.Unlock() if lb.b == nil { lb.b = make(map[string][]byte) } delete(lb.b, id) return nil } func (lb *LocalBucket) UpdateBucket(id string, rev int, bs []byte) error { return errors.New("no") } func (lb *LocalBucket) ListBuckets(acct string) ([][]byte, error) { lb.mux.Lock() defer lb.mux.Unlock() var bss [][]byte for _, bs := range lb.b { bss = append(bss, bs) } return bss, nil } func (lb *LocalBucket) GetBucket(id string) ([]byte, error) { lb.mux.Lock() defer lb.mux.Unlock() bs, ok := lb.b[id] if !ok { return nil, errors.New("not found") } return bs, nil } func (lb *LocalBucket) GetBucketID(name string) (string, error) { lb.mux.Lock() defer lb.mux.Unlock() id, ok := lb.nti[name] if !ok { return "", errors.New("not found") } return id, nil } blazer-0.6.1/examples/000077500000000000000000000000001451327606200146215ustar00rootroot00000000000000blazer-0.6.1/examples/simple/000077500000000000000000000000001451327606200161125ustar00rootroot00000000000000blazer-0.6.1/examples/simple/simple.go000066400000000000000000000056461451327606200177450ustar00rootroot00000000000000// Copyright 2017, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This is a simple program that will copy named files into or out of B2. // // To copy a file into B2: // // B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple /path/to/file b2://bucket/path/to/dst // // To copy a file out: // // B2_ACCOUNT_ID=foo B2_ACCOUNT_KEY=bar simple b2://bucket/path/to/file /path/to/dst package main import ( "context" "flag" "fmt" "io" "net/url" "os" "strings" "github.com/Backblaze/blazer/b2" ) func main() { flag.Parse() b2id := os.Getenv("B2_ACCOUNT_ID") b2key := os.Getenv("B2_ACCOUNT_KEY") args := flag.Args() if len(args) != 2 { fmt.Printf("Usage:\n\nsimple [src] [dst]\n") return } src, dst := args[0], args[1] ctx := context.Background() c, err := b2.NewClient(ctx, b2id, b2key) if err != nil { fmt.Println(err) return } var r io.ReadCloser var w io.WriteCloser if strings.HasPrefix(src, "b2://") { reader, err := b2Reader(ctx, c, src) if err != nil { fmt.Println(err) return } r = reader } else { f, err := os.Open(src) if err != nil { fmt.Println(err) return } r = f } // Readers do not need their errors checked on close. (Also it's a little // silly to defer this in main(), but.) defer r.Close() if strings.HasPrefix(dst, "b2://") { writer, err := b2Writer(ctx, c, dst) if err != nil { fmt.Println(err) return } w = writer } else { f, err := os.Create(dst) if err != nil { fmt.Println(err) return } w = f } // Copy and check error. if _, err := io.Copy(w, r); err != nil { fmt.Println(err) return } // It is very important to check the error of the writer. if err := w.Close(); err != nil { fmt.Println(err) } } func b2Reader(ctx context.Context, c *b2.Client, path string) (io.ReadCloser, error) { o, err := b2Obj(ctx, c, path) if err != nil { return nil, err } return o.NewReader(ctx), nil } func b2Writer(ctx context.Context, c *b2.Client, path string) (io.WriteCloser, error) { o, err := b2Obj(ctx, c, path) if err != nil { return nil, err } return o.NewWriter(ctx), nil } func b2Obj(ctx context.Context, c *b2.Client, path string) (*b2.Object, error) { uri, err := url.Parse(path) if err != nil { return nil, err } bucket, err := c.Bucket(ctx, uri.Host) if err != nil { return nil, err } // B2 paths must not begin with /, so trim it here. return bucket.Object(strings.TrimPrefix(uri.Path, "/")), nil } blazer-0.6.1/go.mod000066400000000000000000000012101451327606200141030ustar00rootroot00000000000000module github.com/Backblaze/blazer go 1.18 require ( github.com/golang/protobuf v1.5.3 github.com/google/subcommands v1.2.0 github.com/google/uuid v1.3.1 github.com/grpc-ecosystem/grpc-gateway v1.16.0 golang.org/x/net v0.17.0 google.golang.org/genproto/googleapis/api v0.0.0-20231009173412-8bfb1ae86b6c google.golang.org/grpc v1.58.2 ) require ( golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect google.golang.org/protobuf v1.31.0 // indirect ) blazer-0.6.1/go.sum000066400000000000000000000222651451327606200141450ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= google.golang.org/genproto/googleapis/api v0.0.0-20231009173412-8bfb1ae86b6c h1:0RtEmmHjemvUXloH7+RuBSIw7n+GEHMOMY1CkGYnWq4= google.golang.org/genproto/googleapis/api v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:Wth13BrWMRN/G+guBLupKa6fslcWZv14R0ZKDRkNfY8= google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= blazer-0.6.1/internal/000077500000000000000000000000001451327606200146175ustar00rootroot00000000000000blazer-0.6.1/internal/b2assets/000077500000000000000000000000001451327606200163455ustar00rootroot00000000000000blazer-0.6.1/internal/b2assets/b2assets.go000066400000000000000000000155041451327606200204270ustar00rootroot00000000000000// Code generated by go-bindata. // sources: // data/status.html // DO NOT EDIT! package b2assets import ( "bytes" "compress/gzip" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "time" ) func bindataRead(data []byte, name string) ([]byte, error) { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err != nil { return nil, fmt.Errorf("Read %q: %v", name, err) } var buf bytes.Buffer _, err = io.Copy(&buf, gz) clErr := gz.Close() if err != nil { return nil, fmt.Errorf("Read %q: %v", name, err) } if clErr != nil { return nil, err } return buf.Bytes(), nil } type asset struct { bytes []byte info os.FileInfo } type bindataFileInfo struct { name string size int64 mode os.FileMode modTime time.Time } func (fi bindataFileInfo) Name() string { return fi.name } func (fi bindataFileInfo) Size() int64 { return fi.size } func (fi bindataFileInfo) Mode() os.FileMode { return fi.mode } func (fi bindataFileInfo) ModTime() time.Time { return fi.modTime } func (fi bindataFileInfo) IsDir() bool { return false } func (fi bindataFileInfo) Sys() interface{} { return nil } var _dataStatusHtml = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd4\x93\x41\x6f\xe3\x20\x10\x85\xef\xf9\x15\xb3\x56\x8e\x51\x90\x73\x5c\x4d\xb8\xec\xee\x79\xa3\xaa\x52\xd5\x23\x36\xa3\x60\x09\x43\x84\x71\x9a\xc8\xe2\xbf\x57\x18\x83\xa3\xb6\x87\x5e\x7b\xf2\x98\xf7\xe6\xf1\xbe\x03\xf8\xeb\xef\xff\x3f\xcf\xaf\xa7\x7f\xa0\x7c\xaf\xf9\x06\xf3\x87\x84\xe4\x1b\x00\xf4\x9d\xd7\xc4\x9b\x03\xb4\xba\x23\xe3\x61\xf0\xc2\x8f\x03\xb2\x74\xbe\x41\x96\x9c\xd8\x58\x79\x8f\x0b\xd3\xb4\xed\xc9\x2b\x2b\x07\xf8\x7d\x84\x3c\xee\x43\x48\x9a\x1c\x9d\xf0\x9d\x35\xb3\xba\xfe\x14\xdd\x8b\x46\x53\xd4\xd2\x90\xce\x51\xd5\xbc\xb5\xa3\xf1\xd0\xdc\xa1\xb5\x92\x90\xa9\x3a\xb5\x8b\xae\x38\xc5\x65\x27\xcc\x99\x60\xb9\x3e\x66\xe4\x26\x73\x48\x74\xbb\x64\x8d\xa3\xe4\xa5\x69\x08\xc8\xbc\xcc\x52\xc9\xc9\xed\xe6\xa4\x52\x75\xc9\x5a\x43\x3a\x23\xe9\x06\x4b\xf1\x7c\x79\xf1\x7f\xcc\x26\x23\x73\x1b\x96\xeb\xac\xa7\xc8\x0a\x50\x64\x1e\x2f\xda\x0a\x39\x64\xda\x87\x6e\x46\xf4\xb4\x83\xed\x55\xe8\xd8\x6e\xff\xe2\x3a\x4f\xae\x70\xaa\x03\x9f\xa6\x64\x82\x58\x40\x1d\x3e\xc1\x75\x72\x07\xdb\x8b\xb3\xe7\x99\xee\x2a\xf4\xfe\xe4\xec\xd9\xd1\xb0\x02\x46\xb4\x36\x3a\x43\x00\xbc\x2c\x2a\x5c\x85\x1e\xe9\x58\x4d\xd3\xbc\x1d\x42\x05\xbd\xb8\x1d\xab\xba\xe2\xc8\xb2\x89\x63\xe3\x80\x7d\x05\xfd\x80\xaa\x6a\x2e\xed\x9b\xf9\x26\xe1\x13\x09\xf9\xa3\x08\x91\xa5\x17\x81\x2c\xbd\xa8\xf7\x00\x00\x00\xff\xff\xd4\xf0\x90\xb4\x69\x03\x00\x00") func dataStatusHtmlBytes() ([]byte, error) { return bindataRead( _dataStatusHtml, "data/status.html", ) } func dataStatusHtml() (*asset, error) { bytes, err := dataStatusHtmlBytes() if err != nil { return nil, err } info := bindataFileInfo{name: "data/status.html", size: 873, mode: os.FileMode(436), modTime: time.Unix(1520578750, 0)} a := &asset{bytes: bytes, info: info} return a, nil } // Asset loads and returns the asset for the given name. // It returns an error if the asset could not be found or // could not be loaded. func Asset(name string) ([]byte, error) { cannonicalName := strings.Replace(name, "\\", "/", -1) if f, ok := _bindata[cannonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) } return a.bytes, nil } return nil, fmt.Errorf("Asset %s not found", name) } // MustAsset is like Asset but panics when Asset would return an error. // It simplifies safe initialization of global variables. func MustAsset(name string) []byte { a, err := Asset(name) if err != nil { panic("asset: Asset(" + name + "): " + err.Error()) } return a } // AssetInfo loads and returns the asset info for the given name. // It returns an error if the asset could not be found or // could not be loaded. func AssetInfo(name string) (os.FileInfo, error) { cannonicalName := strings.Replace(name, "\\", "/", -1) if f, ok := _bindata[cannonicalName]; ok { a, err := f() if err != nil { return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) } return a.info, nil } return nil, fmt.Errorf("AssetInfo %s not found", name) } // AssetNames returns the names of the assets. func AssetNames() []string { names := make([]string, 0, len(_bindata)) for name := range _bindata { names = append(names, name) } return names } // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ "data/status.html": dataStatusHtml, } // AssetDir returns the file names below a certain // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the // following hierarchy: // data/ // foo.txt // img/ // a.png // b.png // then AssetDir("data") would return []string{"foo.txt", "img"} // AssetDir("data/img") would return []string{"a.png", "b.png"} // AssetDir("foo.txt") and AssetDir("notexist") would return an error // AssetDir("") will return []string{"data"}. func AssetDir(name string) ([]string, error) { node := _bintree if len(name) != 0 { cannonicalName := strings.Replace(name, "\\", "/", -1) pathList := strings.Split(cannonicalName, "/") for _, p := range pathList { node = node.Children[p] if node == nil { return nil, fmt.Errorf("Asset %s not found", name) } } } if node.Func != nil { return nil, fmt.Errorf("Asset %s not found", name) } rv := make([]string, 0, len(node.Children)) for childName := range node.Children { rv = append(rv, childName) } return rv, nil } type bintree struct { Func func() (*asset, error) Children map[string]*bintree } var _bintree = &bintree{nil, map[string]*bintree{ "data": &bintree{nil, map[string]*bintree{ "status.html": &bintree{dataStatusHtml, map[string]*bintree{}}, }}, }} // RestoreAsset restores an asset under the given directory func RestoreAsset(dir, name string) error { data, err := Asset(name) if err != nil { return err } info, err := AssetInfo(name) if err != nil { return err } err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) if err != nil { return err } err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) if err != nil { return err } err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) if err != nil { return err } return nil } // RestoreAssets restores an asset under the given directory recursively func RestoreAssets(dir, name string) error { children, err := AssetDir(name) // File if err != nil { return RestoreAsset(dir, name) } // Dir for _, child := range children { err = RestoreAssets(dir, filepath.Join(name, child)) if err != nil { return err } } return nil } func _filePath(dir, name string) string { cannonicalName := strings.Replace(name, "\\", "/", -1) return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) } blazer-0.6.1/internal/b2assets/data/000077500000000000000000000000001451327606200172565ustar00rootroot00000000000000blazer-0.6.1/internal/b2assets/data/status.html000066400000000000000000000015511451327606200214710ustar00rootroot00000000000000 b2 client status {{$methods := methods .}} {{$durations := durations .}} {{$table := table .}}

count by code

{{range $method := $methods}} {{range $duration := $durations}} {{end}} {{end}}
{{$method}}{{index $table $method $duration}}

uploads

{{range $name, $val := .Writers}}

{{ $name }}

{{range $id, $prog := $val.Progress}} {{inc $id}}
{{end}} {{end}}

downloads

{{range $name, $val := .Readers}}

{{ $name }}

{{range $id, $prog := $val.Progress}} {{inc $id}}
{{end}} {{end}} blazer-0.6.1/internal/b2assets/gen.go000066400000000000000000000013561451327606200174520ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package b2assets contains data required by other libraries in blazer. package b2assets //go:generate go-bindata -pkg $GOPACKAGE -o b2assets.go data/ blazer-0.6.1/internal/b2types/000077500000000000000000000000001451327606200162075ustar00rootroot00000000000000blazer-0.6.1/internal/b2types/b2types.go000066400000000000000000000210361451327606200201300ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package b2types implements internal types common to the B2 API. package b2types // You know what would be amazing? If I could autogen this from like a JSON // file. Wouldn't that be amazing? That would be amazing. const ( V1api = "/b2api/v1/" ) type ErrorMessage struct { Status int `json:"status"` Code string `json:"code"` Msg string `json:"message"` } type AuthorizeAccountResponse struct { AccountID string `json:"accountId"` AuthToken string `json:"authorizationToken"` URI string `json:"apiUrl"` DownloadURI string `json:"downloadUrl"` MinPartSize int `json:"minimumPartSize"` PartSize int `json:"recommendedPartSize"` AbsMinPartSize int `json:"absoluteMinimumPartSize"` Allowed Allowance `json:"allowed"` } type Allowance struct { Capabilities []string `json:"capabilities"` Bucket string `json:"bucketId"` Prefix string `json:"namePrefix"` } type LifecycleRule struct { DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"` DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"` Prefix string `json:"fileNamePrefix"` } type CreateBucketRequest struct { AccountID string `json:"accountId"` Name string `json:"bucketName"` Type string `json:"bucketType"` Info map[string]string `json:"bucketInfo"` LifecycleRules []LifecycleRule `json:"lifecycleRules"` } type CreateBucketResponse struct { BucketID string `json:"bucketId"` Name string `json:"bucketName"` Type string `json:"bucketType"` Info map[string]string `json:"bucketInfo"` LifecycleRules []LifecycleRule `json:"lifecycleRules"` Revision int `json:"revision"` } type DeleteBucketRequest struct { AccountID string `json:"accountId"` BucketID string `json:"bucketId"` } type ListBucketsRequest struct { AccountID string `json:"accountId"` Bucket string `json:"bucketId,omitempty"` Name string `json:"bucketName,omitempty"` } type ListBucketsResponse struct { Buckets []CreateBucketResponse `json:"buckets"` } type UpdateBucketRequest struct { AccountID string `json:"accountId"` BucketID string `json:"bucketId"` Type string `json:"bucketType,omitempty"` Info map[string]string `json:"bucketInfo,omitempty"` LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"` IfRevisionIs int `json:"ifRevisionIs,omitempty"` } type UpdateBucketResponse CreateBucketResponse type GetUploadURLRequest struct { BucketID string `json:"bucketId"` } type GetUploadURLResponse struct { URI string `json:"uploadUrl"` Token string `json:"authorizationToken"` } type UploadFileResponse GetFileInfoResponse type DeleteFileVersionRequest struct { Name string `json:"fileName"` FileID string `json:"fileId"` } type StartLargeFileRequest struct { BucketID string `json:"bucketId"` Name string `json:"fileName"` ContentType string `json:"contentType"` Info map[string]string `json:"fileInfo,omitempty"` } type StartLargeFileResponse struct { ID string `json:"fileId"` } type CancelLargeFileRequest struct { ID string `json:"fileId"` } type ListPartsRequest struct { ID string `json:"fileId"` Start int `json:"startPartNumber"` Count int `json:"maxPartCount"` } type ListPartsResponse struct { Next int `json:"nextPartNumber"` Parts []struct { ID string `json:"fileId"` Number int `json:"partNumber"` SHA1 string `json:"contentSha1"` Size int64 `json:"contentLength"` } `json:"parts"` } type getUploadPartURLRequest struct { ID string `json:"fileId"` } type getUploadPartURLResponse struct { URL string `json:"uploadUrl"` Token string `json:"authorizationToken"` } type FinishLargeFileRequest struct { ID string `json:"fileId"` Hashes []string `json:"partSha1Array"` } type FinishLargeFileResponse struct { Name string `json:"fileName"` FileID string `json:"fileId"` Timestamp int64 `json:"uploadTimestamp"` Action string `json:"action"` } type ListFileNamesRequest struct { BucketID string `json:"bucketId"` Count int `json:"maxFileCount"` Continuation string `json:"startFileName,omitempty"` Prefix string `json:"prefix,omitempty"` Delimiter string `json:"delimiter,omitempty"` } type ListFileNamesResponse struct { Continuation string `json:"nextFileName"` Files []GetFileInfoResponse `json:"files"` } type ListFileVersionsRequest struct { BucketID string `json:"bucketId"` Count int `json:"maxFileCount"` StartName string `json:"startFileName,omitempty"` StartID string `json:"startFileId,omitempty"` Prefix string `json:"prefix,omitempty"` Delimiter string `json:"delimiter,omitempty"` } type ListFileVersionsResponse struct { NextName string `json:"nextFileName"` NextID string `json:"nextFileId"` Files []GetFileInfoResponse `json:"files"` } type HideFileRequest struct { BucketID string `json:"bucketId"` File string `json:"fileName"` } type HideFileResponse struct { ID string `json:"fileId"` Timestamp int64 `json:"uploadTimestamp"` Action string `json:"action"` } type GetFileInfoRequest struct { ID string `json:"fileId"` } type GetFileInfoResponse struct { FileID string `json:"fileId,omitempty"` Name string `json:"fileName,omitempty"` AccountID string `json:"accountId,omitempty"` BucketID string `json:"bucketId,omitempty"` Size int64 `json:"contentLength,omitempty"` SHA1 string `json:"contentSha1,omitempty"` MD5 string `json:"contentMd5,omitempty"` ContentType string `json:"contentType,omitempty"` Info map[string]string `json:"fileInfo,omitempty"` Action string `json:"action,omitempty"` Timestamp int64 `json:"uploadTimestamp,omitempty"` } type GetDownloadAuthorizationRequest struct { BucketID string `json:"bucketId"` Prefix string `json:"fileNamePrefix"` Valid int `json:"validDurationInSeconds"` ContentDisposition string `json:"b2ContentDisposition,omitempty"` } type GetDownloadAuthorizationResponse struct { BucketID string `json:"bucketId"` Prefix string `json:"fileNamePrefix"` Token string `json:"authorizationToken"` } type ListUnfinishedLargeFilesRequest struct { BucketID string `json:"bucketId"` Continuation string `json:"startFileId,omitempty"` Count int `json:"maxFileCount,omitempty"` } type ListUnfinishedLargeFilesResponse struct { Files []GetFileInfoResponse `json:"files"` Continuation string `json:"nextFileId"` } type CreateKeyRequest struct { AccountID string `json:"accountId"` Capabilities []string `json:"capabilities"` Name string `json:"keyName"` Valid int `json:"validDurationInSeconds,omitempty"` BucketID string `json:"bucketId,omitempty"` Prefix string `json:"namePrefix,omitempty"` } type Key struct { ID string `json:"applicationKeyId"` Secret string `json:"applicationKey"` AccountID string `json:"accountId"` Capabilities []string `json:"capabilities"` Name string `json:"keyName"` Expires int64 `json:"expirationTimestamp"` BucketID string `json:"bucketId"` Prefix string `json:"namePrefix"` } type CreateKeyResponse Key type DeleteKeyRequest struct { KeyID string `json:"applicationKeyId"` } type DeleteKeyResponse Key type ListKeysRequest struct { AccountID string `json:"accountId"` Max int `json:"maxKeyCount,omitempty"` Next string `json:"startApplicationKeyId,omitempty"` } type ListKeysResponse struct { Keys []Key `json:"keys"` Next string `json:"nextApplicationKeyId"` } blazer-0.6.1/internal/bin/000077500000000000000000000000001451327606200153675ustar00rootroot00000000000000blazer-0.6.1/internal/bin/cleanup/000077500000000000000000000000001451327606200170165ustar00rootroot00000000000000blazer-0.6.1/internal/bin/cleanup/cleanup.go000066400000000000000000000025761451327606200210060ustar00rootroot00000000000000package main import ( "context" "fmt" "os" "strings" "sync" "github.com/Backblaze/blazer/b2" ) const ( apiID = "B2_ACCOUNT_ID" apiKey = "B2_SECRET_KEY" ) func main() { id := os.Getenv(apiID) key := os.Getenv(apiKey) ctx := context.Background() client, err := b2.NewClient(ctx, id, key) if err != nil { fmt.Println(err) return } buckets, err := client.ListBuckets(ctx) if err != nil { fmt.Println(err) return } var kill []string for _, bucket := range buckets { if strings.HasPrefix(bucket.Name(), fmt.Sprintf("%s-b2-tests-", id)) { kill = append(kill, bucket.Name()) } if bucket.Name() == fmt.Sprintf("%s-consistobucket", id) || bucket.Name() == fmt.Sprintf("%s-base-tests", id) { kill = append(kill, bucket.Name()) } } var wg sync.WaitGroup for _, name := range kill { wg.Add(1) go func(name string) { defer wg.Done() fmt.Println("removing", name) if err := killBucket(ctx, client, name); err != nil { fmt.Println(err) } }(name) } wg.Wait() } func killBucket(ctx context.Context, client *b2.Client, name string) error { bucket, err := client.NewBucket(ctx, name, nil) if b2.IsNotExist(err) { return nil } if err != nil { return err } defer bucket.Delete(ctx) iter := bucket.List(ctx, b2.ListHidden()) for iter.Next() { if err := iter.Object().Delete(ctx); err != nil { fmt.Println(err) } } return iter.Err() } blazer-0.6.1/internal/blog/000077500000000000000000000000001451327606200155425ustar00rootroot00000000000000blazer-0.6.1/internal/blog/blog.go000066400000000000000000000023571451327606200170230ustar00rootroot00000000000000// Copyright 2017, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package blog implements a private logger, in the manner of glog, without // polluting the flag namespace or leaving files all over /tmp. // // It has almost no features, and a bunch of global state. package blog import ( "log" "os" "strconv" ) var level int32 type Verbose bool func init() { lvl := os.Getenv("B2_LOG_LEVEL") i, err := strconv.ParseInt(lvl, 10, 32) if err != nil { return } level = int32(i) } func (v Verbose) Info(a ...interface{}) { if v { log.Print(a...) } } func (v Verbose) Infof(format string, a ...interface{}) { if v { log.Printf(format, a...) } } func V(target int32) Verbose { return Verbose(target <= level) } blazer-0.6.1/internal/pyre/000077500000000000000000000000001451327606200155765ustar00rootroot00000000000000blazer-0.6.1/internal/pyre/api.go000066400000000000000000000227121451327606200167020ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pyre import ( "context" "encoding/base64" "errors" "fmt" "net" "net/http" "os" "reflect" "strings" "github.com/golang/protobuf/proto" "github.com/google/uuid" "github.com/grpc-ecosystem/grpc-gateway/runtime" "google.golang.org/grpc" "google.golang.org/grpc/metadata" pb "github.com/Backblaze/blazer/internal/pyre/proto" ) type apiErr struct { Status int `json:"status"` Code string `json:"code"` Message string `json:"message"` } func serveMuxOptions() []runtime.ServeMuxOption { return []runtime.ServeMuxOption{ runtime.WithMarshalerOption("*", &runtime.JSONPb{}), runtime.WithProtoErrorHandler(func(ctx context.Context, mux *runtime.ServeMux, m runtime.Marshaler, rw http.ResponseWriter, req *http.Request, err error) { aErr := apiErr{ Status: 400, Code: "uh oh", Message: err.Error(), } rw.WriteHeader(aErr.Status) if err := m.NewEncoder(rw).Encode(aErr); err != nil { fmt.Fprintln(os.Stdout, err) } }), } } func getAuth(ctx context.Context) (string, error) { md, ok := metadata.FromIncomingContext(ctx) if !ok { return "", errors.New("no metadata") } data := md.Get("authorization") if len(data) == 0 { return "", nil } return data[0], nil } func RegisterServerOnMux(ctx context.Context, srv *Server, mux *http.ServeMux) error { rmux := runtime.NewServeMux(serveMuxOptions()...) l, err := net.Listen("tcp", "localhost:0") if err != nil { return err } gsrv := grpc.NewServer() if err := pb.RegisterPyreServiceHandlerFromEndpoint(ctx, rmux, l.Addr().String(), []grpc.DialOption{grpc.WithInsecure()}); err != nil { return err } pb.RegisterPyreServiceServer(gsrv, srv) mux.Handle("/b2api/v1/", rmux) go gsrv.Serve(l) go func() { <-ctx.Done() gsrv.GracefulStop() }() return nil } type AccountManager interface { Authorize(acct, key string) (string, error) CheckCreds(token, api string) error APIRoot(acct string) string DownloadRoot(acct string) string UploadPartHost(fileID string) (string, error) UploadHost(id string) (string, error) Sizes(acct string) (recommended, minimum int32) } type BucketManager interface { AddBucket(id, name string, bs []byte) error RemoveBucket(id string) error UpdateBucket(id string, rev int, bs []byte) error ListBuckets(acct string) ([][]byte, error) GetBucket(id string) ([]byte, error) } type LargeFileOrganizer interface { Start(bucketID, fileName, fileID string, bs []byte) error Get(fileID string) ([]byte, error) Parts(fileID string) ([]string, error) Finish(fileID string) error } type Server struct { Account AccountManager Bucket BucketManager LargeFile LargeFileOrganizer List ListManager } func (s *Server) AuthorizeAccount(ctx context.Context, req *pb.AuthorizeAccountRequest) (*pb.AuthorizeAccountResponse, error) { auth, err := getAuth(ctx) if err != nil { return nil, err } if !strings.HasPrefix(auth, "Basic ") { return nil, errors.New("basic auth required") } auth = strings.TrimPrefix(auth, "Basic ") bs, err := base64.StdEncoding.DecodeString(auth) if err != nil { return nil, err } split := strings.Split(string(bs), ":") if len(split) != 2 { return nil, errors.New("bad auth") } acct, key := split[0], split[1] token, err := s.Account.Authorize(acct, key) if err != nil { return nil, err } rec, min := s.Account.Sizes(acct) return &pb.AuthorizeAccountResponse{ AuthorizationToken: token, ApiUrl: s.Account.APIRoot(acct), DownloadUrl: s.Account.DownloadRoot(acct), RecommendedPartSize: rec, MinimumPartSize: rec, AbsoluteMinimumPartSize: min, }, nil } func (s *Server) ListBuckets(ctx context.Context, req *pb.ListBucketsRequest) (*pb.ListBucketsResponse, error) { resp := &pb.ListBucketsResponse{} buckets, err := s.Bucket.ListBuckets(req.AccountId) if err != nil { return nil, err } for _, bs := range buckets { var bucket pb.Bucket if err := proto.Unmarshal(bs, &bucket); err != nil { return nil, err } resp.Buckets = append(resp.Buckets, &bucket) } return resp, nil } func (s *Server) CreateBucket(ctx context.Context, req *pb.Bucket) (*pb.Bucket, error) { req.BucketId = uuid.New().String() bs, err := proto.Marshal(req) if err != nil { return nil, err } if err := s.Bucket.AddBucket(req.BucketId, req.BucketName, bs); err != nil { return nil, err } return req, nil } func (s *Server) DeleteBucket(ctx context.Context, req *pb.Bucket) (*pb.Bucket, error) { bs, err := s.Bucket.GetBucket(req.BucketId) if err != nil { return nil, err } var bucket pb.Bucket if err := proto.Unmarshal(bs, &bucket); err != nil { return nil, err } if err := s.Bucket.RemoveBucket(req.BucketId); err != nil { return nil, err } return &bucket, nil } func (s *Server) GetUploadUrl(ctx context.Context, req *pb.GetUploadUrlRequest) (*pb.GetUploadUrlResponse, error) { host, err := s.Account.UploadHost(req.BucketId) if err != nil { return nil, err } return &pb.GetUploadUrlResponse{ UploadUrl: fmt.Sprintf("%s/b2api/v1/b2_upload_file/%s", host, req.BucketId), BucketId: req.BucketId, }, nil } func (s *Server) StartLargeFile(ctx context.Context, req *pb.StartLargeFileRequest) (*pb.StartLargeFileResponse, error) { fileID := uuid.New().String() resp := &pb.StartLargeFileResponse{ FileId: fileID, FileName: req.FileName, BucketId: req.BucketId, ContentType: req.ContentType, FileInfo: req.FileInfo, } bs, err := proto.Marshal(resp) if err != nil { return nil, err } if err := s.LargeFile.Start(req.BucketId, req.FileName, fileID, bs); err != nil { return nil, err } return resp, nil } func (s *Server) GetUploadPartUrl(ctx context.Context, req *pb.GetUploadPartUrlRequest) (*pb.GetUploadPartUrlResponse, error) { host, err := s.Account.UploadPartHost(req.FileId) if err != nil { return nil, err } return &pb.GetUploadPartUrlResponse{ UploadUrl: fmt.Sprintf("%s/b2api/v1/b2_upload_part/%s", host, req.FileId), }, nil } func (s *Server) FinishLargeFile(ctx context.Context, req *pb.FinishLargeFileRequest) (*pb.FinishLargeFileResponse, error) { parts, err := s.LargeFile.Parts(req.FileId) if err != nil { return nil, err } if !reflect.DeepEqual(parts, req.PartSha1Array) { return nil, errors.New("sha1 array mismatch") } if err := s.LargeFile.Finish(req.FileId); err != nil { return nil, err } return &pb.FinishLargeFileResponse{}, nil } func (s *Server) ListFileVersions(ctx context.Context, req *pb.ListFileVersionsRequest) (*pb.ListFileVersionsResponse, error) { return nil, nil } type objTuple struct { name, version string } type ListManager interface { // NextN returns the next n objects, sorted by lexicographical order by name, // beginning at and including, if it exists, fileName. If withPrefix is not // empty, it only returns names that begin with that prefix. If skipPrefix // is not empty, then the no files with that prefix are returned. If the two // conflict, skipPrefix wins (i.e., do not return the entry). // // If fewer than n entries are returned, this signifies that no more names // exist that meet these criteria. NextN(bucketID, fileName, withPrefix, skipPrefix string, n int) ([]VersionedObject, error) } type VersionedObject interface { Name() string NextNVersions(begin string, n int) ([]string, error) } func getDirNames(lm ListManager, bucket, name, prefix, delim string, n int) ([]string, error) { var sfx string var out []string for n > 0 { vo, err := lm.NextN(bucket, name, prefix, sfx, 1) if err != nil { return nil, err } if len(vo) == 0 { return out, nil } v := vo[0] name = v.Name() suffix := name[len(prefix):] i := strings.Index(suffix, delim) if i < 0 { sfx = "" out = append(out, name) name += "\000" n-- continue } sfx = v.Name()[:len(prefix)+i+1] out = append(out, sfx) n-- } return out, nil } //func getNextObjects(lm ListManager, bucket, name, prefix, delimiter string, n int) ([]VersionedObject, error) { // if delimiter == "" { // return lm.NextN(bucket, name, prefix, "", n) // } // afterPfx := strings.TrimPrefix(name, prefix) // i := strings.Index(afterPfx, delimiter) // if i == 0 { // // } // if i < 0 { // return lm.NextN(bucket, name, prefix, "", n) // } // skipPfx := name[:len(prefix)+i] // // TO //} // //func listFileVersions(lm ListManager, bucket, name, version, prefix, delimiter string, n int) ([]objTuple, error) { // var tups []objTuple // var got int // for { // objs, err := getNextObjects(bucket, name, prefix, delimiter, n-got) // if err != nil { // return nil, err // } // if len(objs) == 0 { // break // } // for _, o := range objs { // var begin string // if len(tups) == 0 { // begin = version // } // vers, err := lm.NextNVersions(begin, n-got) // if err != nil { // return nil, err // } // got += len(vers) // for _, ver := range vers { // tups = append(tups, objTuple{name: o.Name(), version: ver}) // } // if got >= n { // return tups[:n], nil // } // } // } // return tups, nil //} blazer-0.6.1/internal/pyre/api_test.go000066400000000000000000000057101451327606200177400ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pyre import ( "reflect" "sort" "strings" "sync" "testing" ) type testVersionedObject struct { name string versions []string } func (t testVersionedObject) Name() string { return t.name } func (t testVersionedObject) NextNVersions(b string, n int) ([]string, error) { var out []string var seen bool if b == "" { seen = true } for _, v := range t.versions { if b == v { seen = true } if !seen { continue } if len(out) >= n { return out, nil } out = append(out, v) } return out, nil } type testListManager struct { objs map[string][]string m sync.Mutex } func (t *testListManager) NextN(b, fn, pfx, spfx string, n int) ([]VersionedObject, error) { t.m.Lock() defer t.m.Unlock() var out []VersionedObject var keys []string for k := range t.objs { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { if k < fn { continue } if !strings.HasPrefix(k, pfx) { continue } if spfx != "" && strings.HasPrefix(k, spfx) { continue } out = append(out, testVersionedObject{name: k, versions: t.objs[k]}) n-- if n <= 0 { return out, nil } } return out, nil } func TestGetDirNames(t *testing.T) { table := []struct { lm ListManager name string pfx string delim string num int want []string }{ { lm: &testListManager{ objs: map[string][]string{ "/usr/local/etc/foo/bar": {"a"}, "/usr/local/etc/foo/baz": {"a"}, "/usr/local/etc/foo": {"a"}, "/usr/local/etc/fool": {"a"}, }, }, num: 2, pfx: "/usr/local/etc/", delim: "/", want: []string{"/usr/local/etc/foo", "/usr/local/etc/foo/"}, }, { lm: &testListManager{ objs: map[string][]string{ "/usr/local/etc/foo/bar": {"a"}, "/usr/local/etc/foo/baz": {"a"}, "/usr/local/etc/foo": {"a"}, "/usr/local/etc/fool": {"a"}, "/usr/local/etc/bar": {"a"}, }, }, num: 4, pfx: "/usr/local/etc/", delim: "/", want: []string{"/usr/local/etc/bar", "/usr/local/etc/foo", "/usr/local/etc/foo/", "/usr/local/etc/fool"}, }, } for _, e := range table { got, err := getDirNames(e.lm, "", e.name, e.pfx, e.delim, e.num) if err != nil { t.Error(err) continue } if !reflect.DeepEqual(got, e.want) { t.Errorf("getDirNames(%v, %q, %q, %q, %d): got %v, want %v", e.lm, e.name, e.pfx, e.delim, e.num, got, e.want) } } } blazer-0.6.1/internal/pyre/download.go000066400000000000000000000070061451327606200177370ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pyre import ( "fmt" "io" "net/http" "strconv" "strings" ) type DownloadableObject interface { Size() int64 Reader() io.ReaderAt io.Closer } type DownloadManager interface { ObjectByName(bucketID, name string) (DownloadableObject, error) GetBucketID(bucket string) (string, error) GetBucket(id string) ([]byte, error) } type downloadServer struct { dm DownloadManager } type downloadRequest struct { off, n int64 } func parseDownloadHeaders(r *http.Request) (*downloadRequest, error) { rang := r.Header.Get("Range") if rang == "" { return &downloadRequest{}, nil } if !strings.HasPrefix(rang, "bytes=") { return nil, fmt.Errorf("unknown range format: %q", rang) } rang = strings.TrimPrefix(rang, "bytes=") if !strings.Contains(rang, "-") { return nil, fmt.Errorf("unknown range format: %q", rang) } parts := strings.Split(rang, "-") off, err := strconv.ParseInt(parts[0], 10, 64) if err != nil { return nil, err } end, err := strconv.ParseInt(parts[1], 10, 64) if err != nil { return nil, err } return &downloadRequest{ off: off, n: (end + 1) - off, }, nil } func (fs *downloadServer) serveWholeObject(rw http.ResponseWriter, obj DownloadableObject) { rw.Header().Set("Content-Length", fmt.Sprintf("%d", obj.Size())) sr := io.NewSectionReader(obj.Reader(), 0, obj.Size()) if _, err := io.Copy(rw, sr); err != nil { http.Error(rw, err.Error(), 503) fmt.Println("no reader", err) } } func (fs *downloadServer) servePartialObject(rw http.ResponseWriter, obj DownloadableObject, off, len int64) { if off >= obj.Size() { http.Error(rw, "hell naw", 416) fmt.Printf("range not good (%d-%d for %d)\n", off, len, obj.Size()) return } if off+len > obj.Size() { len = obj.Size() - off } sr := io.NewSectionReader(obj.Reader(), off, len) rw.Header().Set("Content-Length", fmt.Sprintf("%d", len)) rw.WriteHeader(206) // this goes after headers are set if _, err := io.Copy(rw, sr); err != nil { fmt.Println("bad read:", err) } } func (fs *downloadServer) ServeHTTP(rw http.ResponseWriter, r *http.Request) { req, err := parseDownloadHeaders(r) if err != nil { http.Error(rw, err.Error(), 503) fmt.Println("weird header") return } path := strings.TrimPrefix(r.URL.Path, "/") parts := strings.Split(path, "/") if len(parts) < 3 { http.Error(rw, err.Error(), 404) fmt.Println("weird file") return } bucket := parts[1] bid, err := fs.dm.GetBucketID(bucket) if err != nil { http.Error(rw, err.Error(), 503) fmt.Println("no bucket:", err) return } file := strings.Join(parts[2:], "/") obj, err := fs.dm.ObjectByName(bid, file) if err != nil { http.Error(rw, err.Error(), 503) fmt.Println("no reader", err) return } defer obj.Close() if req.off == 0 && req.n == 0 { fs.serveWholeObject(rw, obj) return } fs.servePartialObject(rw, obj, req.off, req.n) } func RegisterDownloadManagerOnMux(d DownloadManager, mux *http.ServeMux) { mux.Handle("/file/", &downloadServer{dm: d}) } blazer-0.6.1/internal/pyre/large.go000066400000000000000000000044741451327606200172300ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pyre import ( "encoding/json" "fmt" "io" "net/http" "strconv" "strings" ) const uploadFilePartPrefix = "/b2api/v1/b2_upload_part/" type LargeFileManager interface { PartWriter(id string, part int) (io.WriteCloser, error) } type largeFileServer struct { fm LargeFileManager } type uploadPartRequest struct { ID string `json:"fileId"` Part int `json:"partNumber"` Size int64 `json:"contentLength"` Hash string `json:"contentSha1"` } func parseUploadPartHeaders(r *http.Request) (uploadPartRequest, error) { var ur uploadPartRequest ur.Hash = r.Header.Get("X-Bz-Content-Sha1") part, err := strconv.ParseInt(r.Header.Get("X-Bz-Part-Number"), 10, 64) if err != nil { return ur, err } ur.Part = int(part) size, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) if err != nil { return ur, err } ur.Size = size ur.ID = strings.TrimPrefix(r.URL.Path, uploadFilePartPrefix) return ur, nil } func (fs *largeFileServer) ServeHTTP(rw http.ResponseWriter, r *http.Request) { req, err := parseUploadPartHeaders(r) if err != nil { http.Error(rw, err.Error(), 500) fmt.Println("oh no") return } w, err := fs.fm.PartWriter(req.ID, req.Part) if err != nil { http.Error(rw, err.Error(), 500) fmt.Println("oh no") return } if _, err := io.Copy(w, io.LimitReader(r.Body, req.Size)); err != nil { w.Close() http.Error(rw, err.Error(), 500) fmt.Println("oh no") return } if err := w.Close(); err != nil { http.Error(rw, err.Error(), 500) fmt.Println("oh no") return } if err := json.NewEncoder(rw).Encode(req); err != nil { fmt.Println("oh no") } } func RegisterLargeFileManagerOnMux(f LargeFileManager, mux *http.ServeMux) { mux.Handle(uploadFilePartPrefix, &largeFileServer{fm: f}) } blazer-0.6.1/internal/pyre/proto/000077500000000000000000000000001451327606200167415ustar00rootroot00000000000000blazer-0.6.1/internal/pyre/proto/pyre.pb.go000066400000000000000000002245151451327606200206600ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto/pyre.proto package pyre_proto import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type AuthorizeAccountRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *AuthorizeAccountRequest) Reset() { *m = AuthorizeAccountRequest{} } func (m *AuthorizeAccountRequest) String() string { return proto.CompactTextString(m) } func (*AuthorizeAccountRequest) ProtoMessage() {} func (*AuthorizeAccountRequest) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{0} } func (m *AuthorizeAccountRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AuthorizeAccountRequest.Unmarshal(m, b) } func (m *AuthorizeAccountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AuthorizeAccountRequest.Marshal(b, m, deterministic) } func (dst *AuthorizeAccountRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_AuthorizeAccountRequest.Merge(dst, src) } func (m *AuthorizeAccountRequest) XXX_Size() int { return xxx_messageInfo_AuthorizeAccountRequest.Size(m) } func (m *AuthorizeAccountRequest) XXX_DiscardUnknown() { xxx_messageInfo_AuthorizeAccountRequest.DiscardUnknown(m) } var xxx_messageInfo_AuthorizeAccountRequest proto.InternalMessageInfo type AuthorizeAccountResponse struct { // The identifier for the account. AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` // An authorization token to use with all calls, other than // b2_authorize_account, that need an Authorization header. This // authorization token is valid for at most 24 hours. AuthorizationToken string `protobuf:"bytes,2,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"` // The base URL to use for all API calls except for uploading and downloading // files. ApiUrl string `protobuf:"bytes,3,opt,name=api_url,json=apiUrl,proto3" json:"api_url,omitempty"` // The base URL to use for downloading files. DownloadUrl string `protobuf:"bytes,4,opt,name=download_url,json=downloadUrl,proto3" json:"download_url,omitempty"` // The recommended size for each part of a large file. We recommend using // this part size for optimal upload performance. RecommendedPartSize int32 `protobuf:"varint,5,opt,name=recommended_part_size,json=recommendedPartSize,proto3" json:"recommended_part_size,omitempty"` // The smallest possible size of a part of a large file (except the last // one). This is smaller than the recommended part size. If you use it, you // may find that it takes longer overall to upload a large file. AbsoluteMinimumPartSize int32 `protobuf:"varint,6,opt,name=absolute_minimum_part_size,json=absoluteMinimumPartSize,proto3" json:"absolute_minimum_part_size,omitempty"` MinimumPartSize int32 `protobuf:"varint,7,opt,name=minimum_part_size,json=minimumPartSize,proto3" json:"minimum_part_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *AuthorizeAccountResponse) Reset() { *m = AuthorizeAccountResponse{} } func (m *AuthorizeAccountResponse) String() string { return proto.CompactTextString(m) } func (*AuthorizeAccountResponse) ProtoMessage() {} func (*AuthorizeAccountResponse) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{1} } func (m *AuthorizeAccountResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AuthorizeAccountResponse.Unmarshal(m, b) } func (m *AuthorizeAccountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AuthorizeAccountResponse.Marshal(b, m, deterministic) } func (dst *AuthorizeAccountResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_AuthorizeAccountResponse.Merge(dst, src) } func (m *AuthorizeAccountResponse) XXX_Size() int { return xxx_messageInfo_AuthorizeAccountResponse.Size(m) } func (m *AuthorizeAccountResponse) XXX_DiscardUnknown() { xxx_messageInfo_AuthorizeAccountResponse.DiscardUnknown(m) } var xxx_messageInfo_AuthorizeAccountResponse proto.InternalMessageInfo func (m *AuthorizeAccountResponse) GetAccountId() string { if m != nil { return m.AccountId } return "" } func (m *AuthorizeAccountResponse) GetAuthorizationToken() string { if m != nil { return m.AuthorizationToken } return "" } func (m *AuthorizeAccountResponse) GetApiUrl() string { if m != nil { return m.ApiUrl } return "" } func (m *AuthorizeAccountResponse) GetDownloadUrl() string { if m != nil { return m.DownloadUrl } return "" } func (m *AuthorizeAccountResponse) GetRecommendedPartSize() int32 { if m != nil { return m.RecommendedPartSize } return 0 } func (m *AuthorizeAccountResponse) GetAbsoluteMinimumPartSize() int32 { if m != nil { return m.AbsoluteMinimumPartSize } return 0 } func (m *AuthorizeAccountResponse) GetMinimumPartSize() int32 { if m != nil { return m.MinimumPartSize } return 0 } type ListBucketsRequest struct { // The ID of your account. AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` // When specified, the result will be a list containing just this bucket, if // it's present in the account, or no buckets if the account does not have a // bucket with this ID. BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` // When specified, the result will be a list containing just this bucket, if // it's present in the account, or no buckets if the account does not have a // bucket with this ID. BucketName string `protobuf:"bytes,3,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the // list buckets response. If not present, only buckets with bucket types // "allPublic", "allPrivate" and "snapshot" will be returned. A special // filter value of ["all"] will return all bucket types. // // If present, it must be in the form of a json array of strings containing // valid bucket types in quotes and separated by a comma. Valid bucket types // include "allPrivate", "allPublic", "snapshot", and other values added in // the future. // // A bad request error will be returned if "all" is used with other bucket // types, this field is empty, or invalid bucket types are requested. BucketTypes []string `protobuf:"bytes,4,rep,name=bucket_types,json=bucketTypes,proto3" json:"bucket_types,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ListBucketsRequest) Reset() { *m = ListBucketsRequest{} } func (m *ListBucketsRequest) String() string { return proto.CompactTextString(m) } func (*ListBucketsRequest) ProtoMessage() {} func (*ListBucketsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{2} } func (m *ListBucketsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListBucketsRequest.Unmarshal(m, b) } func (m *ListBucketsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListBucketsRequest.Marshal(b, m, deterministic) } func (dst *ListBucketsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ListBucketsRequest.Merge(dst, src) } func (m *ListBucketsRequest) XXX_Size() int { return xxx_messageInfo_ListBucketsRequest.Size(m) } func (m *ListBucketsRequest) XXX_DiscardUnknown() { xxx_messageInfo_ListBucketsRequest.DiscardUnknown(m) } var xxx_messageInfo_ListBucketsRequest proto.InternalMessageInfo func (m *ListBucketsRequest) GetAccountId() string { if m != nil { return m.AccountId } return "" } func (m *ListBucketsRequest) GetBucketId() string { if m != nil { return m.BucketId } return "" } func (m *ListBucketsRequest) GetBucketName() string { if m != nil { return m.BucketName } return "" } func (m *ListBucketsRequest) GetBucketTypes() []string { if m != nil { return m.BucketTypes } return nil } type LifecycleRule struct { // After a file is uploaded, the number of days before it can be hidden. DaysFromUploadingToHiding int32 `protobuf:"varint,1,opt,name=days_from_uploading_to_hiding,json=daysFromUploadingToHiding,proto3" json:"days_from_uploading_to_hiding,omitempty"` // After a file is hidden, the number of days before it can be deleted. DaysFromHidingToDeleting int32 `protobuf:"varint,2,opt,name=days_from_hiding_to_deleting,json=daysFromHidingToDeleting,proto3" json:"days_from_hiding_to_deleting,omitempty"` // The rule applies to files whose names start with this prefix. FileNamePrefix string `protobuf:"bytes,3,opt,name=file_name_prefix,json=fileNamePrefix,proto3" json:"file_name_prefix,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LifecycleRule) Reset() { *m = LifecycleRule{} } func (m *LifecycleRule) String() string { return proto.CompactTextString(m) } func (*LifecycleRule) ProtoMessage() {} func (*LifecycleRule) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{3} } func (m *LifecycleRule) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LifecycleRule.Unmarshal(m, b) } func (m *LifecycleRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LifecycleRule.Marshal(b, m, deterministic) } func (dst *LifecycleRule) XXX_Merge(src proto.Message) { xxx_messageInfo_LifecycleRule.Merge(dst, src) } func (m *LifecycleRule) XXX_Size() int { return xxx_messageInfo_LifecycleRule.Size(m) } func (m *LifecycleRule) XXX_DiscardUnknown() { xxx_messageInfo_LifecycleRule.DiscardUnknown(m) } var xxx_messageInfo_LifecycleRule proto.InternalMessageInfo func (m *LifecycleRule) GetDaysFromUploadingToHiding() int32 { if m != nil { return m.DaysFromUploadingToHiding } return 0 } func (m *LifecycleRule) GetDaysFromHidingToDeleting() int32 { if m != nil { return m.DaysFromHidingToDeleting } return 0 } func (m *LifecycleRule) GetFileNamePrefix() string { if m != nil { return m.FileNamePrefix } return "" } type CorsRule struct { // A name for humans to recognize the rule in a user interface. Names must be // unique within a bucket. Names can consist of upper-case and lower-case // English letters, numbers, and "-". No other characters are allowed. A name // must be at least 6 characters long, and can be at most 50 characters long. // These are all allowed names: myPhotosSite, allowAnyHttps, // backblaze-images. Names that start with "b2-" are reserved for Backblaze // use. CorsRuleName string `protobuf:"bytes,1,opt,name=cors_rule_name,json=corsRuleName,proto3" json:"cors_rule_name,omitempty"` // A non-empty list specifying which origins the rule covers. Each value may // have one of many formats: // // * The origin can be fully specified, such as http://www.example.com:8180 // or https://www.example.com:4433. // // * The origin can omit a default port, such as https://www.example.com. // // * The origin may have a single '*' as part of the domain name, such as // https://*.example.com, https://*:8443 or https://*. // // * The origin may be 'https' to match any origin that uses HTTPS. (This is // broader than 'https://*' because it matches any port.) // // * Finally, the origin can be a single '*' to match any origin. // // If any entry is "*", it must be the only entry. There can be at most one // "https" entry and no entry after it may start with "https:". AllowedOrigins []string `protobuf:"bytes,2,rep,name=allowed_origins,json=allowedOrigins,proto3" json:"allowed_origins,omitempty"` // A list specifying which operations the rule allows. At least one value // must be specified. All values must be from the following list. More values // may be added to this list at any time. // // b2_download_file_by_name // b2_download_file_by_id // b2_upload_file // b2_upload_part AllowedOperations []string `protobuf:"bytes,3,rep,name=allowed_operations,json=allowedOperations,proto3" json:"allowed_operations,omitempty"` // If present, this is a list of headers that are allowed in a pre-flight // OPTIONS's request's Access-Control-Request-Headers header value. Each // value may have one of many formats: // // * It may be a complete header name, such as x-bz-content-sha1. // // * It may end with an asterisk, such as x-bz-info-*. // // * Finally, it may be a single '*' to match any header. // // If any entry is "*", it must be the only entry in the list. If this list // is missing, it is be treated as if it is a list with no entries. AllowedHeaders []string `protobuf:"bytes,4,rep,name=allowed_headers,json=allowedHeaders,proto3" json:"allowed_headers,omitempty"` // If present, this is a list of headers that may be exposed to an // application inside the client (eg. exposed to Javascript in a browser). // Each entry in the list must be a complete header name (eg. // "x-bz-content-sha1"). If this list is missing or empty, no headers will be // exposed. ExposeHeaders []string `protobuf:"bytes,5,rep,name=expose_headers,json=exposeHeaders,proto3" json:"expose_headers,omitempty"` // This specifies the maximum number of seconds that a browser may cache the // response to a preflight request. The value must not be negative and it // must not be more than 86,400 seconds (one day). MaxAgeSeconds int32 `protobuf:"varint,6,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CorsRule) Reset() { *m = CorsRule{} } func (m *CorsRule) String() string { return proto.CompactTextString(m) } func (*CorsRule) ProtoMessage() {} func (*CorsRule) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{4} } func (m *CorsRule) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CorsRule.Unmarshal(m, b) } func (m *CorsRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CorsRule.Marshal(b, m, deterministic) } func (dst *CorsRule) XXX_Merge(src proto.Message) { xxx_messageInfo_CorsRule.Merge(dst, src) } func (m *CorsRule) XXX_Size() int { return xxx_messageInfo_CorsRule.Size(m) } func (m *CorsRule) XXX_DiscardUnknown() { xxx_messageInfo_CorsRule.DiscardUnknown(m) } var xxx_messageInfo_CorsRule proto.InternalMessageInfo func (m *CorsRule) GetCorsRuleName() string { if m != nil { return m.CorsRuleName } return "" } func (m *CorsRule) GetAllowedOrigins() []string { if m != nil { return m.AllowedOrigins } return nil } func (m *CorsRule) GetAllowedOperations() []string { if m != nil { return m.AllowedOperations } return nil } func (m *CorsRule) GetAllowedHeaders() []string { if m != nil { return m.AllowedHeaders } return nil } func (m *CorsRule) GetExposeHeaders() []string { if m != nil { return m.ExposeHeaders } return nil } func (m *CorsRule) GetMaxAgeSeconds() int32 { if m != nil { return m.MaxAgeSeconds } return 0 } type Bucket struct { AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` BucketName string `protobuf:"bytes,3,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"` BucketType string `protobuf:"bytes,4,opt,name=bucket_type,json=bucketType,proto3" json:"bucket_type,omitempty"` BucketInfo map[string]string `protobuf:"bytes,5,rep,name=bucket_info,json=bucketInfo,proto3" json:"bucket_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` CoresRules []*CorsRule `protobuf:"bytes,6,rep,name=cores_rules,json=coresRules,proto3" json:"cores_rules,omitempty"` LifecycleRules []*LifecycleRule `protobuf:"bytes,7,rep,name=lifecycle_rules,json=lifecycleRules,proto3" json:"lifecycle_rules,omitempty"` Revision int32 `protobuf:"varint,8,opt,name=revision,proto3" json:"revision,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{5} } func (m *Bucket) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Bucket.Unmarshal(m, b) } func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) } func (dst *Bucket) XXX_Merge(src proto.Message) { xxx_messageInfo_Bucket.Merge(dst, src) } func (m *Bucket) XXX_Size() int { return xxx_messageInfo_Bucket.Size(m) } func (m *Bucket) XXX_DiscardUnknown() { xxx_messageInfo_Bucket.DiscardUnknown(m) } var xxx_messageInfo_Bucket proto.InternalMessageInfo func (m *Bucket) GetAccountId() string { if m != nil { return m.AccountId } return "" } func (m *Bucket) GetBucketId() string { if m != nil { return m.BucketId } return "" } func (m *Bucket) GetBucketName() string { if m != nil { return m.BucketName } return "" } func (m *Bucket) GetBucketType() string { if m != nil { return m.BucketType } return "" } func (m *Bucket) GetBucketInfo() map[string]string { if m != nil { return m.BucketInfo } return nil } func (m *Bucket) GetCoresRules() []*CorsRule { if m != nil { return m.CoresRules } return nil } func (m *Bucket) GetLifecycleRules() []*LifecycleRule { if m != nil { return m.LifecycleRules } return nil } func (m *Bucket) GetRevision() int32 { if m != nil { return m.Revision } return 0 } type ListBucketsResponse struct { Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ListBucketsResponse) Reset() { *m = ListBucketsResponse{} } func (m *ListBucketsResponse) String() string { return proto.CompactTextString(m) } func (*ListBucketsResponse) ProtoMessage() {} func (*ListBucketsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{6} } func (m *ListBucketsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListBucketsResponse.Unmarshal(m, b) } func (m *ListBucketsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListBucketsResponse.Marshal(b, m, deterministic) } func (dst *ListBucketsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ListBucketsResponse.Merge(dst, src) } func (m *ListBucketsResponse) XXX_Size() int { return xxx_messageInfo_ListBucketsResponse.Size(m) } func (m *ListBucketsResponse) XXX_DiscardUnknown() { xxx_messageInfo_ListBucketsResponse.DiscardUnknown(m) } var xxx_messageInfo_ListBucketsResponse proto.InternalMessageInfo func (m *ListBucketsResponse) GetBuckets() []*Bucket { if m != nil { return m.Buckets } return nil } type GetUploadUrlRequest struct { BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetUploadUrlRequest) Reset() { *m = GetUploadUrlRequest{} } func (m *GetUploadUrlRequest) String() string { return proto.CompactTextString(m) } func (*GetUploadUrlRequest) ProtoMessage() {} func (*GetUploadUrlRequest) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{7} } func (m *GetUploadUrlRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetUploadUrlRequest.Unmarshal(m, b) } func (m *GetUploadUrlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetUploadUrlRequest.Marshal(b, m, deterministic) } func (dst *GetUploadUrlRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetUploadUrlRequest.Merge(dst, src) } func (m *GetUploadUrlRequest) XXX_Size() int { return xxx_messageInfo_GetUploadUrlRequest.Size(m) } func (m *GetUploadUrlRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetUploadUrlRequest.DiscardUnknown(m) } var xxx_messageInfo_GetUploadUrlRequest proto.InternalMessageInfo func (m *GetUploadUrlRequest) GetBucketId() string { if m != nil { return m.BucketId } return "" } type GetUploadUrlResponse struct { BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` UploadUrl string `protobuf:"bytes,2,opt,name=upload_url,json=uploadUrl,proto3" json:"upload_url,omitempty"` AuthorizationToken string `protobuf:"bytes,3,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetUploadUrlResponse) Reset() { *m = GetUploadUrlResponse{} } func (m *GetUploadUrlResponse) String() string { return proto.CompactTextString(m) } func (*GetUploadUrlResponse) ProtoMessage() {} func (*GetUploadUrlResponse) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{8} } func (m *GetUploadUrlResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetUploadUrlResponse.Unmarshal(m, b) } func (m *GetUploadUrlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetUploadUrlResponse.Marshal(b, m, deterministic) } func (dst *GetUploadUrlResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetUploadUrlResponse.Merge(dst, src) } func (m *GetUploadUrlResponse) XXX_Size() int { return xxx_messageInfo_GetUploadUrlResponse.Size(m) } func (m *GetUploadUrlResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetUploadUrlResponse.DiscardUnknown(m) } var xxx_messageInfo_GetUploadUrlResponse proto.InternalMessageInfo func (m *GetUploadUrlResponse) GetBucketId() string { if m != nil { return m.BucketId } return "" } func (m *GetUploadUrlResponse) GetUploadUrl() string { if m != nil { return m.UploadUrl } return "" } func (m *GetUploadUrlResponse) GetAuthorizationToken() string { if m != nil { return m.AuthorizationToken } return "" } type UploadFileResponse struct { FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` BucketId string `protobuf:"bytes,4,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` ContentLength int32 `protobuf:"varint,5,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` ContentSha1 string `protobuf:"bytes,6,opt,name=content_sha1,json=contentSha1,proto3" json:"content_sha1,omitempty"` ContentType string `protobuf:"bytes,7,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` FileInfo map[string]string `protobuf:"bytes,8,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Action string `protobuf:"bytes,9,opt,name=action,proto3" json:"action,omitempty"` UploadTimestamp int64 `protobuf:"varint,10,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *UploadFileResponse) Reset() { *m = UploadFileResponse{} } func (m *UploadFileResponse) String() string { return proto.CompactTextString(m) } func (*UploadFileResponse) ProtoMessage() {} func (*UploadFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{9} } func (m *UploadFileResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UploadFileResponse.Unmarshal(m, b) } func (m *UploadFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UploadFileResponse.Marshal(b, m, deterministic) } func (dst *UploadFileResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_UploadFileResponse.Merge(dst, src) } func (m *UploadFileResponse) XXX_Size() int { return xxx_messageInfo_UploadFileResponse.Size(m) } func (m *UploadFileResponse) XXX_DiscardUnknown() { xxx_messageInfo_UploadFileResponse.DiscardUnknown(m) } var xxx_messageInfo_UploadFileResponse proto.InternalMessageInfo func (m *UploadFileResponse) GetFileId() string { if m != nil { return m.FileId } return "" } func (m *UploadFileResponse) GetFileName() string { if m != nil { return m.FileName } return "" } func (m *UploadFileResponse) GetAccountId() string { if m != nil { return m.AccountId } return "" } func (m *UploadFileResponse) GetBucketId() string { if m != nil { return m.BucketId } return "" } func (m *UploadFileResponse) GetContentLength() int32 { if m != nil { return m.ContentLength } return 0 } func (m *UploadFileResponse) GetContentSha1() string { if m != nil { return m.ContentSha1 } return "" } func (m *UploadFileResponse) GetContentType() string { if m != nil { return m.ContentType } return "" } func (m *UploadFileResponse) GetFileInfo() map[string]string { if m != nil { return m.FileInfo } return nil } func (m *UploadFileResponse) GetAction() string { if m != nil { return m.Action } return "" } func (m *UploadFileResponse) GetUploadTimestamp() int64 { if m != nil { return m.UploadTimestamp } return 0 } type StartLargeFileRequest struct { BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` ContentType string `protobuf:"bytes,3,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` FileInfo map[string]string `protobuf:"bytes,4,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StartLargeFileRequest) Reset() { *m = StartLargeFileRequest{} } func (m *StartLargeFileRequest) String() string { return proto.CompactTextString(m) } func (*StartLargeFileRequest) ProtoMessage() {} func (*StartLargeFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{10} } func (m *StartLargeFileRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartLargeFileRequest.Unmarshal(m, b) } func (m *StartLargeFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StartLargeFileRequest.Marshal(b, m, deterministic) } func (dst *StartLargeFileRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StartLargeFileRequest.Merge(dst, src) } func (m *StartLargeFileRequest) XXX_Size() int { return xxx_messageInfo_StartLargeFileRequest.Size(m) } func (m *StartLargeFileRequest) XXX_DiscardUnknown() { xxx_messageInfo_StartLargeFileRequest.DiscardUnknown(m) } var xxx_messageInfo_StartLargeFileRequest proto.InternalMessageInfo func (m *StartLargeFileRequest) GetBucketId() string { if m != nil { return m.BucketId } return "" } func (m *StartLargeFileRequest) GetFileName() string { if m != nil { return m.FileName } return "" } func (m *StartLargeFileRequest) GetContentType() string { if m != nil { return m.ContentType } return "" } func (m *StartLargeFileRequest) GetFileInfo() map[string]string { if m != nil { return m.FileInfo } return nil } type StartLargeFileResponse struct { FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` BucketId string `protobuf:"bytes,4,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` ContentType string `protobuf:"bytes,5,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` FileInfo map[string]string `protobuf:"bytes,6,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` UploadTimestamp int64 `protobuf:"varint,7,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StartLargeFileResponse) Reset() { *m = StartLargeFileResponse{} } func (m *StartLargeFileResponse) String() string { return proto.CompactTextString(m) } func (*StartLargeFileResponse) ProtoMessage() {} func (*StartLargeFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{11} } func (m *StartLargeFileResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartLargeFileResponse.Unmarshal(m, b) } func (m *StartLargeFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StartLargeFileResponse.Marshal(b, m, deterministic) } func (dst *StartLargeFileResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StartLargeFileResponse.Merge(dst, src) } func (m *StartLargeFileResponse) XXX_Size() int { return xxx_messageInfo_StartLargeFileResponse.Size(m) } func (m *StartLargeFileResponse) XXX_DiscardUnknown() { xxx_messageInfo_StartLargeFileResponse.DiscardUnknown(m) } var xxx_messageInfo_StartLargeFileResponse proto.InternalMessageInfo func (m *StartLargeFileResponse) GetFileId() string { if m != nil { return m.FileId } return "" } func (m *StartLargeFileResponse) GetFileName() string { if m != nil { return m.FileName } return "" } func (m *StartLargeFileResponse) GetAccountId() string { if m != nil { return m.AccountId } return "" } func (m *StartLargeFileResponse) GetBucketId() string { if m != nil { return m.BucketId } return "" } func (m *StartLargeFileResponse) GetContentType() string { if m != nil { return m.ContentType } return "" } func (m *StartLargeFileResponse) GetFileInfo() map[string]string { if m != nil { return m.FileInfo } return nil } func (m *StartLargeFileResponse) GetUploadTimestamp() int64 { if m != nil { return m.UploadTimestamp } return 0 } type GetUploadPartUrlRequest struct { FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetUploadPartUrlRequest) Reset() { *m = GetUploadPartUrlRequest{} } func (m *GetUploadPartUrlRequest) String() string { return proto.CompactTextString(m) } func (*GetUploadPartUrlRequest) ProtoMessage() {} func (*GetUploadPartUrlRequest) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{12} } func (m *GetUploadPartUrlRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetUploadPartUrlRequest.Unmarshal(m, b) } func (m *GetUploadPartUrlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetUploadPartUrlRequest.Marshal(b, m, deterministic) } func (dst *GetUploadPartUrlRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetUploadPartUrlRequest.Merge(dst, src) } func (m *GetUploadPartUrlRequest) XXX_Size() int { return xxx_messageInfo_GetUploadPartUrlRequest.Size(m) } func (m *GetUploadPartUrlRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetUploadPartUrlRequest.DiscardUnknown(m) } var xxx_messageInfo_GetUploadPartUrlRequest proto.InternalMessageInfo func (m *GetUploadPartUrlRequest) GetFileId() string { if m != nil { return m.FileId } return "" } type GetUploadPartUrlResponse struct { FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` UploadUrl string `protobuf:"bytes,2,opt,name=upload_url,json=uploadUrl,proto3" json:"upload_url,omitempty"` AuthorizationToken string `protobuf:"bytes,3,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetUploadPartUrlResponse) Reset() { *m = GetUploadPartUrlResponse{} } func (m *GetUploadPartUrlResponse) String() string { return proto.CompactTextString(m) } func (*GetUploadPartUrlResponse) ProtoMessage() {} func (*GetUploadPartUrlResponse) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{13} } func (m *GetUploadPartUrlResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetUploadPartUrlResponse.Unmarshal(m, b) } func (m *GetUploadPartUrlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetUploadPartUrlResponse.Marshal(b, m, deterministic) } func (dst *GetUploadPartUrlResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetUploadPartUrlResponse.Merge(dst, src) } func (m *GetUploadPartUrlResponse) XXX_Size() int { return xxx_messageInfo_GetUploadPartUrlResponse.Size(m) } func (m *GetUploadPartUrlResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetUploadPartUrlResponse.DiscardUnknown(m) } var xxx_messageInfo_GetUploadPartUrlResponse proto.InternalMessageInfo func (m *GetUploadPartUrlResponse) GetFileId() string { if m != nil { return m.FileId } return "" } func (m *GetUploadPartUrlResponse) GetUploadUrl() string { if m != nil { return m.UploadUrl } return "" } func (m *GetUploadPartUrlResponse) GetAuthorizationToken() string { if m != nil { return m.AuthorizationToken } return "" } type FinishLargeFileRequest struct { FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` PartSha1Array []string `protobuf:"bytes,2,rep,name=part_sha1_array,json=partSha1Array,proto3" json:"part_sha1_array,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *FinishLargeFileRequest) Reset() { *m = FinishLargeFileRequest{} } func (m *FinishLargeFileRequest) String() string { return proto.CompactTextString(m) } func (*FinishLargeFileRequest) ProtoMessage() {} func (*FinishLargeFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{14} } func (m *FinishLargeFileRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FinishLargeFileRequest.Unmarshal(m, b) } func (m *FinishLargeFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FinishLargeFileRequest.Marshal(b, m, deterministic) } func (dst *FinishLargeFileRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_FinishLargeFileRequest.Merge(dst, src) } func (m *FinishLargeFileRequest) XXX_Size() int { return xxx_messageInfo_FinishLargeFileRequest.Size(m) } func (m *FinishLargeFileRequest) XXX_DiscardUnknown() { xxx_messageInfo_FinishLargeFileRequest.DiscardUnknown(m) } var xxx_messageInfo_FinishLargeFileRequest proto.InternalMessageInfo func (m *FinishLargeFileRequest) GetFileId() string { if m != nil { return m.FileId } return "" } func (m *FinishLargeFileRequest) GetPartSha1Array() []string { if m != nil { return m.PartSha1Array } return nil } type FinishLargeFileResponse struct { FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` BucketId string `protobuf:"bytes,4,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` ContentLength int64 `protobuf:"varint,5,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` ContentSha1 string `protobuf:"bytes,6,opt,name=content_sha1,json=contentSha1,proto3" json:"content_sha1,omitempty"` ContentType string `protobuf:"bytes,7,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` FileInfo map[string]string `protobuf:"bytes,8,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Action string `protobuf:"bytes,9,opt,name=action,proto3" json:"action,omitempty"` UploadTimestamp int64 `protobuf:"varint,10,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *FinishLargeFileResponse) Reset() { *m = FinishLargeFileResponse{} } func (m *FinishLargeFileResponse) String() string { return proto.CompactTextString(m) } func (*FinishLargeFileResponse) ProtoMessage() {} func (*FinishLargeFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{15} } func (m *FinishLargeFileResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FinishLargeFileResponse.Unmarshal(m, b) } func (m *FinishLargeFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FinishLargeFileResponse.Marshal(b, m, deterministic) } func (dst *FinishLargeFileResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_FinishLargeFileResponse.Merge(dst, src) } func (m *FinishLargeFileResponse) XXX_Size() int { return xxx_messageInfo_FinishLargeFileResponse.Size(m) } func (m *FinishLargeFileResponse) XXX_DiscardUnknown() { xxx_messageInfo_FinishLargeFileResponse.DiscardUnknown(m) } var xxx_messageInfo_FinishLargeFileResponse proto.InternalMessageInfo func (m *FinishLargeFileResponse) GetFileId() string { if m != nil { return m.FileId } return "" } func (m *FinishLargeFileResponse) GetFileName() string { if m != nil { return m.FileName } return "" } func (m *FinishLargeFileResponse) GetAccountId() string { if m != nil { return m.AccountId } return "" } func (m *FinishLargeFileResponse) GetBucketId() string { if m != nil { return m.BucketId } return "" } func (m *FinishLargeFileResponse) GetContentLength() int64 { if m != nil { return m.ContentLength } return 0 } func (m *FinishLargeFileResponse) GetContentSha1() string { if m != nil { return m.ContentSha1 } return "" } func (m *FinishLargeFileResponse) GetContentType() string { if m != nil { return m.ContentType } return "" } func (m *FinishLargeFileResponse) GetFileInfo() map[string]string { if m != nil { return m.FileInfo } return nil } func (m *FinishLargeFileResponse) GetAction() string { if m != nil { return m.Action } return "" } func (m *FinishLargeFileResponse) GetUploadTimestamp() int64 { if m != nil { return m.UploadTimestamp } return 0 } type ListFileVersionsRequest struct { BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` StartFileName string `protobuf:"bytes,2,opt,name=start_file_name,json=startFileName,proto3" json:"start_file_name,omitempty"` StartFileId string `protobuf:"bytes,3,opt,name=start_file_id,json=startFileId,proto3" json:"start_file_id,omitempty"` MaxFileCount int32 `protobuf:"varint,4,opt,name=max_file_count,json=maxFileCount,proto3" json:"max_file_count,omitempty"` Prefix string `protobuf:"bytes,5,opt,name=prefix,proto3" json:"prefix,omitempty"` Delimiter string `protobuf:"bytes,6,opt,name=delimiter,proto3" json:"delimiter,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ListFileVersionsRequest) Reset() { *m = ListFileVersionsRequest{} } func (m *ListFileVersionsRequest) String() string { return proto.CompactTextString(m) } func (*ListFileVersionsRequest) ProtoMessage() {} func (*ListFileVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{16} } func (m *ListFileVersionsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListFileVersionsRequest.Unmarshal(m, b) } func (m *ListFileVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListFileVersionsRequest.Marshal(b, m, deterministic) } func (dst *ListFileVersionsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ListFileVersionsRequest.Merge(dst, src) } func (m *ListFileVersionsRequest) XXX_Size() int { return xxx_messageInfo_ListFileVersionsRequest.Size(m) } func (m *ListFileVersionsRequest) XXX_DiscardUnknown() { xxx_messageInfo_ListFileVersionsRequest.DiscardUnknown(m) } var xxx_messageInfo_ListFileVersionsRequest proto.InternalMessageInfo func (m *ListFileVersionsRequest) GetBucketId() string { if m != nil { return m.BucketId } return "" } func (m *ListFileVersionsRequest) GetStartFileName() string { if m != nil { return m.StartFileName } return "" } func (m *ListFileVersionsRequest) GetStartFileId() string { if m != nil { return m.StartFileId } return "" } func (m *ListFileVersionsRequest) GetMaxFileCount() int32 { if m != nil { return m.MaxFileCount } return 0 } func (m *ListFileVersionsRequest) GetPrefix() string { if m != nil { return m.Prefix } return "" } func (m *ListFileVersionsRequest) GetDelimiter() string { if m != nil { return m.Delimiter } return "" } type ListFileVersionsResponse struct { Files []*File `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` NextFileName string `protobuf:"bytes,2,opt,name=next_file_name,json=nextFileName,proto3" json:"next_file_name,omitempty"` NextFileId string `protobuf:"bytes,3,opt,name=next_file_id,json=nextFileId,proto3" json:"next_file_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ListFileVersionsResponse) Reset() { *m = ListFileVersionsResponse{} } func (m *ListFileVersionsResponse) String() string { return proto.CompactTextString(m) } func (*ListFileVersionsResponse) ProtoMessage() {} func (*ListFileVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{17} } func (m *ListFileVersionsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListFileVersionsResponse.Unmarshal(m, b) } func (m *ListFileVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListFileVersionsResponse.Marshal(b, m, deterministic) } func (dst *ListFileVersionsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ListFileVersionsResponse.Merge(dst, src) } func (m *ListFileVersionsResponse) XXX_Size() int { return xxx_messageInfo_ListFileVersionsResponse.Size(m) } func (m *ListFileVersionsResponse) XXX_DiscardUnknown() { xxx_messageInfo_ListFileVersionsResponse.DiscardUnknown(m) } var xxx_messageInfo_ListFileVersionsResponse proto.InternalMessageInfo func (m *ListFileVersionsResponse) GetFiles() []*File { if m != nil { return m.Files } return nil } func (m *ListFileVersionsResponse) GetNextFileName() string { if m != nil { return m.NextFileName } return "" } func (m *ListFileVersionsResponse) GetNextFileId() string { if m != nil { return m.NextFileId } return "" } type File struct { FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` ContentLength int64 `protobuf:"varint,3,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` ContentType string `protobuf:"bytes,4,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` ContentSha1 string `protobuf:"bytes,5,opt,name=content_sha1,json=contentSha1,proto3" json:"content_sha1,omitempty"` FileInfo map[string]string `protobuf:"bytes,6,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Action string `protobuf:"bytes,7,opt,name=action,proto3" json:"action,omitempty"` Size int64 `protobuf:"varint,8,opt,name=size,proto3" json:"size,omitempty"` UploadTimestamp int64 `protobuf:"varint,9,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *File) Reset() { *m = File{} } func (m *File) String() string { return proto.CompactTextString(m) } func (*File) ProtoMessage() {} func (*File) Descriptor() ([]byte, []int) { return fileDescriptor_pyre_492df08819220afa, []int{18} } func (m *File) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_File.Unmarshal(m, b) } func (m *File) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_File.Marshal(b, m, deterministic) } func (dst *File) XXX_Merge(src proto.Message) { xxx_messageInfo_File.Merge(dst, src) } func (m *File) XXX_Size() int { return xxx_messageInfo_File.Size(m) } func (m *File) XXX_DiscardUnknown() { xxx_messageInfo_File.DiscardUnknown(m) } var xxx_messageInfo_File proto.InternalMessageInfo func (m *File) GetFileId() string { if m != nil { return m.FileId } return "" } func (m *File) GetFileName() string { if m != nil { return m.FileName } return "" } func (m *File) GetContentLength() int64 { if m != nil { return m.ContentLength } return 0 } func (m *File) GetContentType() string { if m != nil { return m.ContentType } return "" } func (m *File) GetContentSha1() string { if m != nil { return m.ContentSha1 } return "" } func (m *File) GetFileInfo() map[string]string { if m != nil { return m.FileInfo } return nil } func (m *File) GetAction() string { if m != nil { return m.Action } return "" } func (m *File) GetSize() int64 { if m != nil { return m.Size } return 0 } func (m *File) GetUploadTimestamp() int64 { if m != nil { return m.UploadTimestamp } return 0 } func init() { proto.RegisterType((*AuthorizeAccountRequest)(nil), "pyre.proto.AuthorizeAccountRequest") proto.RegisterType((*AuthorizeAccountResponse)(nil), "pyre.proto.AuthorizeAccountResponse") proto.RegisterType((*ListBucketsRequest)(nil), "pyre.proto.ListBucketsRequest") proto.RegisterType((*LifecycleRule)(nil), "pyre.proto.LifecycleRule") proto.RegisterType((*CorsRule)(nil), "pyre.proto.CorsRule") proto.RegisterType((*Bucket)(nil), "pyre.proto.Bucket") proto.RegisterMapType((map[string]string)(nil), "pyre.proto.Bucket.BucketInfoEntry") proto.RegisterType((*ListBucketsResponse)(nil), "pyre.proto.ListBucketsResponse") proto.RegisterType((*GetUploadUrlRequest)(nil), "pyre.proto.GetUploadUrlRequest") proto.RegisterType((*GetUploadUrlResponse)(nil), "pyre.proto.GetUploadUrlResponse") proto.RegisterType((*UploadFileResponse)(nil), "pyre.proto.UploadFileResponse") proto.RegisterMapType((map[string]string)(nil), "pyre.proto.UploadFileResponse.FileInfoEntry") proto.RegisterType((*StartLargeFileRequest)(nil), "pyre.proto.StartLargeFileRequest") proto.RegisterMapType((map[string]string)(nil), "pyre.proto.StartLargeFileRequest.FileInfoEntry") proto.RegisterType((*StartLargeFileResponse)(nil), "pyre.proto.StartLargeFileResponse") proto.RegisterMapType((map[string]string)(nil), "pyre.proto.StartLargeFileResponse.FileInfoEntry") proto.RegisterType((*GetUploadPartUrlRequest)(nil), "pyre.proto.GetUploadPartUrlRequest") proto.RegisterType((*GetUploadPartUrlResponse)(nil), "pyre.proto.GetUploadPartUrlResponse") proto.RegisterType((*FinishLargeFileRequest)(nil), "pyre.proto.FinishLargeFileRequest") proto.RegisterType((*FinishLargeFileResponse)(nil), "pyre.proto.FinishLargeFileResponse") proto.RegisterMapType((map[string]string)(nil), "pyre.proto.FinishLargeFileResponse.FileInfoEntry") proto.RegisterType((*ListFileVersionsRequest)(nil), "pyre.proto.ListFileVersionsRequest") proto.RegisterType((*ListFileVersionsResponse)(nil), "pyre.proto.ListFileVersionsResponse") proto.RegisterType((*File)(nil), "pyre.proto.File") proto.RegisterMapType((map[string]string)(nil), "pyre.proto.File.FileInfoEntry") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // PyreServiceClient is the client API for PyreService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type PyreServiceClient interface { // Used to log in to the B2 API. Returns an authorization token that can be // used for account-level operations, and a URL that should be used as the // base URL for subsequent API calls. AuthorizeAccount(ctx context.Context, in *AuthorizeAccountRequest, opts ...grpc.CallOption) (*AuthorizeAccountResponse, error) // Lists buckets associated with an account, in alphabetical order by bucket // name. ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) // Creates a new bucket. A bucket belongs to the account used to create it. // // Buckets can be named. The name must be globally unique. No account can use // a bucket with the same name. Buckets are assigned a unique bucketId which // is used when uploading, downloading, or deleting files. // // There is a limit of 100 buckets per account. CreateBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) // Deletes the bucket specified. Only buckets that contain no version of any // files can be deleted. DeleteBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) GetUploadUrl(ctx context.Context, in *GetUploadUrlRequest, opts ...grpc.CallOption) (*GetUploadUrlResponse, error) // Prepares for uploading the parts of a large file. StartLargeFile(ctx context.Context, in *StartLargeFileRequest, opts ...grpc.CallOption) (*StartLargeFileResponse, error) // Gets an URL to use for uploading parts of a large file. GetUploadPartUrl(ctx context.Context, in *GetUploadPartUrlRequest, opts ...grpc.CallOption) (*GetUploadPartUrlResponse, error) // Converts the parts that have been uploaded into a single B2 file. FinishLargeFile(ctx context.Context, in *FinishLargeFileRequest, opts ...grpc.CallOption) (*FinishLargeFileResponse, error) // Lists all of the versions of all of the files contained in one bucket, in // alphabetical order by file name, and by reverse of date/time uploaded for // versions of files with the same name. ListFileVersions(ctx context.Context, in *ListFileVersionsRequest, opts ...grpc.CallOption) (*ListFileVersionsResponse, error) } type pyreServiceClient struct { cc *grpc.ClientConn } func NewPyreServiceClient(cc *grpc.ClientConn) PyreServiceClient { return &pyreServiceClient{cc} } func (c *pyreServiceClient) AuthorizeAccount(ctx context.Context, in *AuthorizeAccountRequest, opts ...grpc.CallOption) (*AuthorizeAccountResponse, error) { out := new(AuthorizeAccountResponse) err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/AuthorizeAccount", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *pyreServiceClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) { out := new(ListBucketsResponse) err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/ListBuckets", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *pyreServiceClient) CreateBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) { out := new(Bucket) err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/CreateBucket", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *pyreServiceClient) DeleteBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) { out := new(Bucket) err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/DeleteBucket", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *pyreServiceClient) GetUploadUrl(ctx context.Context, in *GetUploadUrlRequest, opts ...grpc.CallOption) (*GetUploadUrlResponse, error) { out := new(GetUploadUrlResponse) err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/GetUploadUrl", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *pyreServiceClient) StartLargeFile(ctx context.Context, in *StartLargeFileRequest, opts ...grpc.CallOption) (*StartLargeFileResponse, error) { out := new(StartLargeFileResponse) err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/StartLargeFile", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *pyreServiceClient) GetUploadPartUrl(ctx context.Context, in *GetUploadPartUrlRequest, opts ...grpc.CallOption) (*GetUploadPartUrlResponse, error) { out := new(GetUploadPartUrlResponse) err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/GetUploadPartUrl", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *pyreServiceClient) FinishLargeFile(ctx context.Context, in *FinishLargeFileRequest, opts ...grpc.CallOption) (*FinishLargeFileResponse, error) { out := new(FinishLargeFileResponse) err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/FinishLargeFile", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *pyreServiceClient) ListFileVersions(ctx context.Context, in *ListFileVersionsRequest, opts ...grpc.CallOption) (*ListFileVersionsResponse, error) { out := new(ListFileVersionsResponse) err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/ListFileVersions", in, out, opts...) if err != nil { return nil, err } return out, nil } // PyreServiceServer is the server API for PyreService service. type PyreServiceServer interface { // Used to log in to the B2 API. Returns an authorization token that can be // used for account-level operations, and a URL that should be used as the // base URL for subsequent API calls. AuthorizeAccount(context.Context, *AuthorizeAccountRequest) (*AuthorizeAccountResponse, error) // Lists buckets associated with an account, in alphabetical order by bucket // name. ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) // Creates a new bucket. A bucket belongs to the account used to create it. // // Buckets can be named. The name must be globally unique. No account can use // a bucket with the same name. Buckets are assigned a unique bucketId which // is used when uploading, downloading, or deleting files. // // There is a limit of 100 buckets per account. CreateBucket(context.Context, *Bucket) (*Bucket, error) // Deletes the bucket specified. Only buckets that contain no version of any // files can be deleted. DeleteBucket(context.Context, *Bucket) (*Bucket, error) GetUploadUrl(context.Context, *GetUploadUrlRequest) (*GetUploadUrlResponse, error) // Prepares for uploading the parts of a large file. StartLargeFile(context.Context, *StartLargeFileRequest) (*StartLargeFileResponse, error) // Gets an URL to use for uploading parts of a large file. GetUploadPartUrl(context.Context, *GetUploadPartUrlRequest) (*GetUploadPartUrlResponse, error) // Converts the parts that have been uploaded into a single B2 file. FinishLargeFile(context.Context, *FinishLargeFileRequest) (*FinishLargeFileResponse, error) // Lists all of the versions of all of the files contained in one bucket, in // alphabetical order by file name, and by reverse of date/time uploaded for // versions of files with the same name. ListFileVersions(context.Context, *ListFileVersionsRequest) (*ListFileVersionsResponse, error) } func RegisterPyreServiceServer(s *grpc.Server, srv PyreServiceServer) { s.RegisterService(&_PyreService_serviceDesc, srv) } func _PyreService_AuthorizeAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AuthorizeAccountRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(PyreServiceServer).AuthorizeAccount(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pyre.proto.PyreService/AuthorizeAccount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PyreServiceServer).AuthorizeAccount(ctx, req.(*AuthorizeAccountRequest)) } return interceptor(ctx, in, info, handler) } func _PyreService_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListBucketsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(PyreServiceServer).ListBuckets(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pyre.proto.PyreService/ListBuckets", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PyreServiceServer).ListBuckets(ctx, req.(*ListBucketsRequest)) } return interceptor(ctx, in, info, handler) } func _PyreService_CreateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Bucket) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(PyreServiceServer).CreateBucket(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pyre.proto.PyreService/CreateBucket", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PyreServiceServer).CreateBucket(ctx, req.(*Bucket)) } return interceptor(ctx, in, info, handler) } func _PyreService_DeleteBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Bucket) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(PyreServiceServer).DeleteBucket(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pyre.proto.PyreService/DeleteBucket", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PyreServiceServer).DeleteBucket(ctx, req.(*Bucket)) } return interceptor(ctx, in, info, handler) } func _PyreService_GetUploadUrl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetUploadUrlRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(PyreServiceServer).GetUploadUrl(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pyre.proto.PyreService/GetUploadUrl", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PyreServiceServer).GetUploadUrl(ctx, req.(*GetUploadUrlRequest)) } return interceptor(ctx, in, info, handler) } func _PyreService_StartLargeFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartLargeFileRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(PyreServiceServer).StartLargeFile(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pyre.proto.PyreService/StartLargeFile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PyreServiceServer).StartLargeFile(ctx, req.(*StartLargeFileRequest)) } return interceptor(ctx, in, info, handler) } func _PyreService_GetUploadPartUrl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetUploadPartUrlRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(PyreServiceServer).GetUploadPartUrl(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pyre.proto.PyreService/GetUploadPartUrl", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PyreServiceServer).GetUploadPartUrl(ctx, req.(*GetUploadPartUrlRequest)) } return interceptor(ctx, in, info, handler) } func _PyreService_FinishLargeFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FinishLargeFileRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(PyreServiceServer).FinishLargeFile(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pyre.proto.PyreService/FinishLargeFile", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PyreServiceServer).FinishLargeFile(ctx, req.(*FinishLargeFileRequest)) } return interceptor(ctx, in, info, handler) } func _PyreService_ListFileVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListFileVersionsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(PyreServiceServer).ListFileVersions(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pyre.proto.PyreService/ListFileVersions", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PyreServiceServer).ListFileVersions(ctx, req.(*ListFileVersionsRequest)) } return interceptor(ctx, in, info, handler) } var _PyreService_serviceDesc = grpc.ServiceDesc{ ServiceName: "pyre.proto.PyreService", HandlerType: (*PyreServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "AuthorizeAccount", Handler: _PyreService_AuthorizeAccount_Handler, }, { MethodName: "ListBuckets", Handler: _PyreService_ListBuckets_Handler, }, { MethodName: "CreateBucket", Handler: _PyreService_CreateBucket_Handler, }, { MethodName: "DeleteBucket", Handler: _PyreService_DeleteBucket_Handler, }, { MethodName: "GetUploadUrl", Handler: _PyreService_GetUploadUrl_Handler, }, { MethodName: "StartLargeFile", Handler: _PyreService_StartLargeFile_Handler, }, { MethodName: "GetUploadPartUrl", Handler: _PyreService_GetUploadPartUrl_Handler, }, { MethodName: "FinishLargeFile", Handler: _PyreService_FinishLargeFile_Handler, }, { MethodName: "ListFileVersions", Handler: _PyreService_ListFileVersions_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "proto/pyre.proto", } func init() { proto.RegisterFile("proto/pyre.proto", fileDescriptor_pyre_492df08819220afa) } var fileDescriptor_pyre_492df08819220afa = []byte{ // 1591 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcb, 0x6f, 0x1b, 0x55, 0x17, 0xd7, 0xd8, 0x71, 0x1c, 0x1f, 0xc7, 0xb1, 0x7b, 0xd3, 0x36, 0x13, 0xb7, 0x4d, 0xdc, 0xdb, 0x24, 0x5f, 0x9a, 0xaf, 0x5f, 0xfc, 0x25, 0x08, 0x09, 0xb5, 0x02, 0x91, 0x06, 0x42, 0x23, 0xa5, 0xa5, 0x72, 0x52, 0x24, 0x16, 0x68, 0x74, 0xe3, 0xb9, 0xb1, 0xaf, 0x3a, 0x0f, 0x73, 0x67, 0x9c, 0xc6, 0x5d, 0xf1, 0x50, 0x17, 0x74, 0x0b, 0xff, 0x00, 0x7f, 0x07, 0x12, 0x1b, 0x36, 0xec, 0xd9, 0xb3, 0xea, 0x86, 0x7f, 0x81, 0x0d, 0xe8, 0x3e, 0xc6, 0x9e, 0x47, 0x9c, 0x14, 0x68, 0x45, 0x57, 0x9e, 0x39, 0xf7, 0x77, 0xce, 0x3d, 0xf7, 0x77, 0x1e, 0x73, 0xae, 0xa1, 0xd6, 0xe3, 0x7e, 0xe8, 0x37, 0x7b, 0x03, 0x4e, 0xd7, 0xe5, 0x23, 0x82, 0xd1, 0x73, 0xfd, 0x6a, 0xc7, 0xf7, 0x3b, 0x0e, 0x6d, 0x92, 0x1e, 0x6b, 0x12, 0xcf, 0xf3, 0x43, 0x12, 0x32, 0xdf, 0x0b, 0xd4, 0x2a, 0x9e, 0x87, 0xb9, 0xad, 0x7e, 0xd8, 0xf5, 0x39, 0x7b, 0x4a, 0xb7, 0xda, 0x6d, 0xbf, 0xef, 0x85, 0x2d, 0xfa, 0x79, 0x9f, 0x06, 0x21, 0xfe, 0x29, 0x07, 0x66, 0x76, 0x2d, 0xe8, 0xf9, 0x5e, 0x40, 0xd1, 0x35, 0x00, 0xa2, 0x44, 0x16, 0xb3, 0x4d, 0xa3, 0x61, 0xac, 0x96, 0x5a, 0x25, 0x2d, 0xd9, 0xb5, 0x51, 0x13, 0x66, 0x89, 0x56, 0x95, 0xdb, 0x59, 0xa1, 0xff, 0x98, 0x7a, 0x66, 0x4e, 0xe2, 0x50, 0x62, 0xe9, 0x40, 0xac, 0xa0, 0x39, 0x28, 0x92, 0x1e, 0xb3, 0xfa, 0xdc, 0x31, 0xf3, 0x12, 0x34, 0x49, 0x7a, 0xec, 0x11, 0x77, 0xd0, 0x75, 0x98, 0xb6, 0xfd, 0x27, 0x9e, 0xe3, 0x13, 0x5b, 0xae, 0x4e, 0xc8, 0xd5, 0x72, 0x24, 0x13, 0x90, 0x4d, 0xb8, 0xc4, 0x69, 0xdb, 0x77, 0x5d, 0xea, 0xd9, 0xd4, 0xb6, 0x7a, 0x84, 0x87, 0x56, 0xc0, 0x9e, 0x52, 0xb3, 0xd0, 0x30, 0x56, 0x0b, 0xad, 0xd9, 0xd8, 0xe2, 0x43, 0xc2, 0xc3, 0x7d, 0xf6, 0x94, 0xa2, 0x3b, 0x50, 0x27, 0x87, 0x81, 0xef, 0xf4, 0x43, 0x6a, 0xb9, 0xcc, 0x63, 0x6e, 0xdf, 0x8d, 0x29, 0x4e, 0x4a, 0xc5, 0xb9, 0x08, 0x71, 0x5f, 0x01, 0x86, 0xca, 0x6b, 0x70, 0x21, 0xab, 0x53, 0x94, 0x3a, 0x55, 0x37, 0x89, 0xc5, 0xdf, 0x19, 0x80, 0xf6, 0x58, 0x10, 0xde, 0xed, 0xb7, 0x1f, 0xd3, 0x30, 0xd0, 0xe4, 0x9e, 0xc7, 0xdf, 0x15, 0x28, 0x1d, 0x4a, 0x05, 0xb1, 0xaa, 0x58, 0x9b, 0x52, 0x82, 0x5d, 0x1b, 0x2d, 0x42, 0x59, 0x2f, 0x7a, 0xc4, 0xa5, 0x9a, 0x2f, 0x50, 0xa2, 0x07, 0xc4, 0xa5, 0x82, 0x33, 0x0d, 0x08, 0x07, 0x3d, 0x1a, 0x98, 0x13, 0x8d, 0xbc, 0xe0, 0x4c, 0xc9, 0x0e, 0x84, 0x08, 0xff, 0x60, 0x40, 0x65, 0x8f, 0x1d, 0xd1, 0xf6, 0xa0, 0xed, 0xd0, 0x56, 0xdf, 0xa1, 0xe8, 0x7d, 0xb8, 0x66, 0x93, 0x41, 0x60, 0x1d, 0x71, 0xdf, 0xb5, 0xfa, 0x3d, 0x41, 0x2e, 0xf3, 0x3a, 0x56, 0xe8, 0x5b, 0x5d, 0x26, 0x9e, 0xa4, 0x93, 0x85, 0xd6, 0xbc, 0x00, 0xed, 0x70, 0xdf, 0x7d, 0x14, 0x41, 0x0e, 0xfc, 0x7b, 0x12, 0x80, 0xde, 0x83, 0xab, 0x23, 0x0b, 0x4a, 0x49, 0xa8, 0xdb, 0xd4, 0xa1, 0xa1, 0x30, 0x90, 0x93, 0x06, 0xcc, 0xc8, 0x80, 0xd2, 0x3a, 0xf0, 0x3f, 0xd0, 0xeb, 0x68, 0x15, 0x6a, 0x47, 0xcc, 0xa1, 0xf2, 0x54, 0x56, 0x8f, 0xd3, 0x23, 0x76, 0xa2, 0x0f, 0x37, 0x23, 0xe4, 0xe2, 0x68, 0x0f, 0xa5, 0x14, 0x7f, 0x91, 0x83, 0xa9, 0x6d, 0x9f, 0x07, 0xd2, 0xf1, 0x25, 0x98, 0x69, 0xfb, 0x3c, 0xb0, 0x78, 0x5f, 0xeb, 0x6a, 0x3a, 0xa7, 0xdb, 0x1a, 0x21, 0x39, 0xf9, 0x0f, 0x54, 0x89, 0xe3, 0xf8, 0x4f, 0xa8, 0x6d, 0xf9, 0x9c, 0x75, 0x98, 0x17, 0x98, 0x39, 0x49, 0xcb, 0x8c, 0x16, 0x7f, 0xac, 0xa4, 0xe8, 0x7f, 0x80, 0x86, 0xc0, 0x1e, 0xe5, 0xaa, 0x5a, 0xcc, 0xbc, 0xc4, 0x5e, 0x88, 0xb0, 0xc3, 0x85, 0xb8, 0xdd, 0x2e, 0x25, 0x36, 0xe5, 0x11, 0xdd, 0x91, 0xdd, 0x7b, 0x4a, 0x8a, 0x96, 0x61, 0x86, 0x9e, 0xf4, 0xfc, 0x80, 0x0e, 0x71, 0x05, 0x89, 0xab, 0x28, 0x69, 0x04, 0x5b, 0x81, 0xaa, 0x4b, 0x4e, 0x2c, 0xd2, 0xa1, 0x56, 0x40, 0xdb, 0xbe, 0x67, 0x07, 0x3a, 0x1b, 0x2b, 0x2e, 0x39, 0xd9, 0xea, 0xd0, 0x7d, 0x25, 0xc4, 0xdf, 0xe7, 0x61, 0x52, 0xe5, 0xd4, 0xeb, 0xcd, 0xa5, 0x11, 0x40, 0xe4, 0x92, 0x2e, 0x3f, 0x18, 0xa5, 0x12, 0xda, 0x1e, 0x02, 0x98, 0x77, 0xe4, 0xcb, 0x43, 0x95, 0x37, 0xf1, 0xfa, 0xa8, 0x03, 0xad, 0x2b, 0x37, 0xf5, 0xcf, 0xae, 0x77, 0xe4, 0x7f, 0xe8, 0x85, 0x7c, 0x10, 0x19, 0x11, 0x02, 0xf4, 0x36, 0x94, 0xdb, 0x3e, 0xa7, 0x2a, 0x88, 0xe2, 0xc4, 0xc2, 0xc8, 0xc5, 0xb8, 0x91, 0x28, 0xdc, 0x2d, 0x90, 0x40, 0xf1, 0x18, 0xa0, 0xbb, 0x50, 0x75, 0xa2, 0x24, 0xd6, 0xaa, 0x45, 0xa9, 0x3a, 0x1f, 0x57, 0x4d, 0xe4, 0x79, 0x6b, 0xc6, 0x89, 0xbf, 0x06, 0xa8, 0x0e, 0x53, 0x9c, 0x1e, 0xb3, 0x80, 0xf9, 0x9e, 0x39, 0x25, 0x99, 0x1e, 0xbe, 0xd7, 0xdf, 0x85, 0x6a, 0xca, 0x6b, 0x54, 0x83, 0xfc, 0x63, 0x3a, 0xd0, 0x2c, 0x8b, 0x47, 0x74, 0x11, 0x0a, 0xc7, 0xc4, 0xe9, 0x53, 0xcd, 0xad, 0x7a, 0xb9, 0x9d, 0x7b, 0xc7, 0xc0, 0xdb, 0x30, 0x9b, 0x28, 0x7d, 0xdd, 0x3b, 0x6f, 0x41, 0x51, 0x1d, 0x3d, 0x30, 0x0d, 0xe9, 0x2d, 0xca, 0xb2, 0xd5, 0x8a, 0x20, 0x78, 0x13, 0x66, 0x3f, 0xa2, 0xa1, 0xaa, 0xb6, 0x47, 0xdc, 0x89, 0x1a, 0x48, 0x22, 0xaa, 0x46, 0x32, 0xaa, 0xf8, 0x6b, 0x03, 0x2e, 0x26, 0x95, 0xf4, 0xd6, 0x67, 0x69, 0x89, 0x3c, 0x52, 0x75, 0x2f, 0x1b, 0xad, 0x3a, 0x4d, 0xa9, 0x1f, 0xd9, 0x18, 0xd7, 0xd3, 0xf3, 0xe3, 0x7a, 0x3a, 0xfe, 0x31, 0x0f, 0x48, 0xb9, 0xb0, 0xc3, 0x1c, 0x3a, 0xf4, 0x61, 0x0e, 0x8a, 0xb2, 0xcc, 0x87, 0x1e, 0x4c, 0x8a, 0x57, 0x95, 0xa8, 0xc3, 0xfa, 0x8f, 0x12, 0x35, 0x2a, 0xfc, 0x54, 0x92, 0xe7, 0xcf, 0x4c, 0xf2, 0x89, 0xd4, 0xc1, 0x96, 0x45, 0x87, 0xf0, 0x42, 0xea, 0x85, 0x96, 0x43, 0xbd, 0x4e, 0xd8, 0xd5, 0x5f, 0x86, 0x8a, 0x96, 0xee, 0x49, 0xa1, 0x68, 0x9b, 0x11, 0x2c, 0xe8, 0x92, 0x0d, 0x59, 0x77, 0xa5, 0x56, 0x59, 0xcb, 0xf6, 0xbb, 0x64, 0x23, 0x0e, 0x91, 0xe5, 0x50, 0x4c, 0x40, 0x64, 0x3d, 0xec, 0xea, 0x53, 0xc8, 0x6a, 0x98, 0x92, 0xf1, 0xbd, 0x15, 0x8f, 0x6f, 0x96, 0x91, 0x75, 0xf1, 0x32, 0xaa, 0x0b, 0x79, 0x66, 0x59, 0x15, 0x97, 0x61, 0x92, 0xb4, 0x05, 0x9f, 0x66, 0x49, 0x7f, 0x13, 0xe5, 0x1b, 0xba, 0x09, 0x35, 0x1d, 0xa8, 0x90, 0xb9, 0x34, 0x08, 0x89, 0xdb, 0x33, 0xa1, 0x61, 0xac, 0xe6, 0x5b, 0x55, 0x25, 0x3f, 0x88, 0xc4, 0xf5, 0x3b, 0x50, 0x49, 0x58, 0xff, 0x4b, 0xf9, 0xfb, 0x87, 0x01, 0x97, 0xf6, 0x43, 0xc2, 0xc3, 0x3d, 0xc2, 0x3b, 0x54, 0xb9, 0x7c, 0x7e, 0xf6, 0x9d, 0x1d, 0xc7, 0x34, 0x83, 0xf9, 0x2c, 0x83, 0x7b, 0x71, 0x06, 0x27, 0x24, 0x83, 0xcd, 0x38, 0x83, 0xa7, 0xba, 0x34, 0x8e, 0xc4, 0x7f, 0xc6, 0xc0, 0x8b, 0x1c, 0x5c, 0x4e, 0x6f, 0xf7, 0xaf, 0xa5, 0x71, 0x9a, 0xba, 0x42, 0x96, 0xba, 0xfb, 0x71, 0xea, 0x54, 0x17, 0xfd, 0xff, 0x59, 0xd4, 0x9d, 0x93, 0x80, 0xa7, 0x25, 0x5a, 0xf1, 0x35, 0x24, 0xda, 0x26, 0xcc, 0x0d, 0xdb, 0x95, 0x98, 0x9c, 0x62, 0x7d, 0x6e, 0x1c, 0xcd, 0xa2, 0xc7, 0x99, 0x59, 0xa5, 0xf3, 0x82, 0xf3, 0xaa, 0x7b, 0xdc, 0xa7, 0x70, 0x79, 0x87, 0x79, 0x2c, 0xe8, 0x66, 0x4a, 0x64, 0xac, 0x0b, 0x2b, 0x50, 0x55, 0x53, 0x63, 0x97, 0x6c, 0x58, 0x84, 0x73, 0x32, 0xd0, 0x93, 0x48, 0x45, 0x88, 0x45, 0x9b, 0xd9, 0x12, 0x42, 0xfc, 0x73, 0x1e, 0xe6, 0x32, 0xb6, 0xdf, 0xb0, 0x1e, 0x9a, 0x7f, 0x3d, 0x3d, 0xf4, 0x41, 0xb6, 0x87, 0x6e, 0xc4, 0xd3, 0x78, 0x0c, 0x2d, 0x6f, 0x6c, 0x23, 0xfd, 0xd5, 0x80, 0x39, 0x31, 0x09, 0x08, 0x0b, 0x9f, 0x50, 0x2e, 0x66, 0x8b, 0xe0, 0xa5, 0x5a, 0xe9, 0x0a, 0x54, 0x03, 0x51, 0xb2, 0x56, 0x3a, 0xa8, 0x15, 0x29, 0xde, 0x89, 0x22, 0x8b, 0xa1, 0x12, 0xc3, 0x0d, 0x83, 0x5b, 0x1e, 0xa2, 0x76, 0x6d, 0x31, 0x27, 0x8b, 0xc9, 0x52, 0x22, 0x64, 0xc8, 0x65, 0x8c, 0x0b, 0xad, 0x69, 0x97, 0x9c, 0x08, 0xc8, 0xb6, 0x90, 0x09, 0xaa, 0xf4, 0xe8, 0xad, 0xda, 0x8b, 0x7e, 0x43, 0x57, 0xa1, 0x64, 0x53, 0x87, 0xb9, 0x2c, 0xa4, 0x5c, 0x47, 0x75, 0x24, 0xc0, 0xcf, 0x0d, 0x30, 0xb3, 0x07, 0xd4, 0xc9, 0xba, 0x02, 0x05, 0xb1, 0x69, 0x34, 0xed, 0xd4, 0x92, 0x91, 0x74, 0x68, 0x4b, 0x2d, 0x0b, 0x07, 0x3d, 0x7a, 0x92, 0x3d, 0xeb, 0xb4, 0x90, 0x0e, 0x8f, 0xda, 0x80, 0xe9, 0x11, 0x6a, 0x78, 0x52, 0x88, 0x30, 0xbb, 0x36, 0xfe, 0x3d, 0x07, 0x13, 0xe2, 0xf1, 0x6f, 0x56, 0x49, 0x36, 0xd3, 0xf3, 0xe7, 0x64, 0x7a, 0x6c, 0x32, 0x4e, 0xa4, 0x71, 0xba, 0x18, 0x0a, 0xd9, 0x62, 0xb8, 0x93, 0x6d, 0xd8, 0x0b, 0x69, 0x7e, 0x5e, 0x22, 0xad, 0x8b, 0x89, 0xb4, 0x46, 0x30, 0x21, 0xaf, 0xa4, 0x53, 0xd2, 0x6f, 0xf9, 0x7c, 0x6a, 0xaa, 0x97, 0x5e, 0x7d, 0xaa, 0x6f, 0xfe, 0x36, 0x05, 0xe5, 0x87, 0x03, 0x4e, 0xf7, 0x29, 0x3f, 0x66, 0x6d, 0x8a, 0x9e, 0x19, 0x50, 0x4b, 0xff, 0x8b, 0x80, 0x6e, 0xc4, 0x8f, 0x38, 0xe6, 0xff, 0x87, 0xfa, 0xd2, 0xd9, 0x20, 0x95, 0x5c, 0x78, 0xe5, 0xab, 0x5f, 0x5e, 0x7c, 0x9b, 0x6b, 0xa0, 0x85, 0xe6, 0xe1, 0x26, 0xe9, 0xb1, 0xe6, 0xf1, 0x46, 0xf3, 0x70, 0xd3, 0x8a, 0x3a, 0x35, 0xb5, 0x74, 0x7b, 0x43, 0x21, 0x94, 0x63, 0xb3, 0x38, 0x5a, 0x48, 0x5e, 0x10, 0xd2, 0xf7, 0xf3, 0xfa, 0xe2, 0xd8, 0x75, 0xbd, 0xef, 0x92, 0xdc, 0x77, 0x01, 0xcf, 0x27, 0xf6, 0x75, 0x58, 0x10, 0x5a, 0x7a, 0x72, 0xbf, 0x6d, 0xac, 0xa1, 0xcf, 0x60, 0x7a, 0x9b, 0x53, 0x12, 0x52, 0x7d, 0x55, 0x3b, 0x65, 0xd2, 0xaf, 0x9f, 0x22, 0xc3, 0xcb, 0xd2, 0xfa, 0x22, 0xae, 0x27, 0xac, 0xb7, 0xa5, 0x29, 0x6d, 0x5f, 0x9b, 0x97, 0xb7, 0xe7, 0x57, 0x63, 0x5e, 0x5e, 0xd4, 0xe3, 0xe6, 0x07, 0x30, 0x1d, 0xbf, 0x45, 0xa0, 0x04, 0x29, 0xa7, 0x5c, 0x4a, 0xea, 0x8d, 0xf1, 0x80, 0x64, 0xb8, 0xf0, 0x95, 0xc4, 0xce, 0x1d, 0x1a, 0x5a, 0xa3, 0xcf, 0xb2, 0xd8, 0xfa, 0x4b, 0x03, 0x66, 0x92, 0xc3, 0x0a, 0xba, 0x7e, 0xee, 0x0c, 0x58, 0xc7, 0xe7, 0xcf, 0x3a, 0x78, 0x55, 0x7a, 0x80, 0xf1, 0xb5, 0x84, 0x07, 0xaa, 0x7b, 0x3a, 0x02, 0x2d, 0x3b, 0x8b, 0xf0, 0xe1, 0xb9, 0x01, 0xb5, 0xf4, 0x84, 0x91, 0x4c, 0xdd, 0x31, 0x43, 0x4b, 0x32, 0x75, 0xc7, 0x0d, 0x29, 0xf8, 0xbf, 0xd2, 0x93, 0x65, 0xdc, 0x18, 0xc7, 0x85, 0x1c, 0x13, 0x34, 0x21, 0xcf, 0x0c, 0xa8, 0xa6, 0x3e, 0x7b, 0x08, 0x9f, 0xf9, 0x4d, 0x54, 0xae, 0xdc, 0x78, 0x89, 0xef, 0x26, 0xbe, 0x29, 0x3d, 0xb9, 0x81, 0x93, 0x45, 0x74, 0x24, 0xd1, 0x29, 0x52, 0xbe, 0x31, 0xa0, 0x96, 0xee, 0xf4, 0x49, 0x52, 0xc6, 0x7c, 0xe8, 0x92, 0xa4, 0x8c, 0xfb, 0x58, 0xe0, 0x35, 0xe9, 0xca, 0x12, 0x5e, 0xcc, 0xd6, 0x95, 0x6c, 0x94, 0xc7, 0x5a, 0xe1, 0xb6, 0xb1, 0x76, 0x38, 0x29, 0x6d, 0xbd, 0xf5, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x09, 0x84, 0xd9, 0x63, 0x01, 0x15, 0x00, 0x00, } blazer-0.6.1/internal/pyre/proto/pyre.pb.gw.go000066400000000000000000000446421451327606200212750ustar00rootroot00000000000000// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. // source: proto/pyre.proto /* Package pyre_proto is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ package pyre_proto import ( "io" "net/http" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/grpc-ecosystem/grpc-gateway/utilities" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) var _ codes.Code var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray func request_PyreService_AuthorizeAccount_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq AuthorizeAccountRequest var metadata runtime.ServerMetadata msg, err := client.AuthorizeAccount(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func request_PyreService_ListBuckets_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ListBucketsRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.ListBuckets(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func request_PyreService_CreateBucket_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq Bucket var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.CreateBucket(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func request_PyreService_DeleteBucket_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq Bucket var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.DeleteBucket(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func request_PyreService_GetUploadUrl_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetUploadUrlRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.GetUploadUrl(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func request_PyreService_StartLargeFile_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq StartLargeFileRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.StartLargeFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func request_PyreService_GetUploadPartUrl_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq GetUploadPartUrlRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.GetUploadPartUrl(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func request_PyreService_FinishLargeFile_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq FinishLargeFileRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.FinishLargeFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } func request_PyreService_ListFileVersions_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ListFileVersionsRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } msg, err := client.ListFileVersions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } // RegisterPyreServiceHandlerFromEndpoint is same as RegisterPyreServiceHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterPyreServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { conn, err := grpc.Dial(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } }() }() return RegisterPyreServiceHandler(ctx, mux, conn) } // RegisterPyreServiceHandler registers the http handlers for service PyreService to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterPyreServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { return RegisterPyreServiceHandlerClient(ctx, mux, NewPyreServiceClient(conn)) } // RegisterPyreServiceHandlerClient registers the http handlers for service PyreService // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "PyreServiceClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "PyreServiceClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "PyreServiceClient" to call the correct interceptors. func RegisterPyreServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client PyreServiceClient) error { mux.Handle("GET", pattern_PyreService_AuthorizeAccount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_PyreService_AuthorizeAccount_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_PyreService_AuthorizeAccount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_PyreService_ListBuckets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_PyreService_ListBuckets_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_PyreService_ListBuckets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_PyreService_CreateBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_PyreService_CreateBucket_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_PyreService_CreateBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_PyreService_DeleteBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_PyreService_DeleteBucket_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_PyreService_DeleteBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_PyreService_GetUploadUrl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_PyreService_GetUploadUrl_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_PyreService_GetUploadUrl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_PyreService_StartLargeFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_PyreService_StartLargeFile_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_PyreService_StartLargeFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_PyreService_GetUploadPartUrl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_PyreService_GetUploadPartUrl_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_PyreService_GetUploadPartUrl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_PyreService_FinishLargeFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_PyreService_FinishLargeFile_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_PyreService_FinishLargeFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) mux.Handle("POST", pattern_PyreService_ListFileVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() if cn, ok := w.(http.CloseNotifier); ok { go func(done <-chan struct{}, closed <-chan bool) { select { case <-done: case <-closed: cancel() } }(ctx.Done(), cn.CloseNotify()) } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } resp, md, err := request_PyreService_ListFileVersions_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } forward_PyreService_ListFileVersions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) return nil } var ( pattern_PyreService_AuthorizeAccount_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_authorize_account"}, "")) pattern_PyreService_ListBuckets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_list_buckets"}, "")) pattern_PyreService_CreateBucket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_create_bucket"}, "")) pattern_PyreService_DeleteBucket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_delete_bucket"}, "")) pattern_PyreService_GetUploadUrl_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_get_upload_url"}, "")) pattern_PyreService_StartLargeFile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_start_large_file"}, "")) pattern_PyreService_GetUploadPartUrl_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_get_upload_part_url"}, "")) pattern_PyreService_FinishLargeFile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_finish_large_file"}, "")) pattern_PyreService_ListFileVersions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_list_file_versions"}, "")) ) var ( forward_PyreService_AuthorizeAccount_0 = runtime.ForwardResponseMessage forward_PyreService_ListBuckets_0 = runtime.ForwardResponseMessage forward_PyreService_CreateBucket_0 = runtime.ForwardResponseMessage forward_PyreService_DeleteBucket_0 = runtime.ForwardResponseMessage forward_PyreService_GetUploadUrl_0 = runtime.ForwardResponseMessage forward_PyreService_StartLargeFile_0 = runtime.ForwardResponseMessage forward_PyreService_GetUploadPartUrl_0 = runtime.ForwardResponseMessage forward_PyreService_FinishLargeFile_0 = runtime.ForwardResponseMessage forward_PyreService_ListFileVersions_0 = runtime.ForwardResponseMessage ) blazer-0.6.1/internal/pyre/proto/pyre.proto000066400000000000000000000260341451327606200210120ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; import "google/api/annotations.proto"; package pyre.proto; message AuthorizeAccountRequest {} message AuthorizeAccountResponse { // The identifier for the account. string account_id = 1; // An authorization token to use with all calls, other than // b2_authorize_account, that need an Authorization header. This // authorization token is valid for at most 24 hours. string authorization_token = 2; // The base URL to use for all API calls except for uploading and downloading // files. string api_url = 3; // The base URL to use for downloading files. string download_url = 4; // The recommended size for each part of a large file. We recommend using // this part size for optimal upload performance. int32 recommended_part_size = 5; // The smallest possible size of a part of a large file (except the last // one). This is smaller than the recommended part size. If you use it, you // may find that it takes longer overall to upload a large file. int32 absolute_minimum_part_size = 6; int32 minimum_part_size = 7; // alias for recommended_part_size } message ListBucketsRequest { // The ID of your account. string account_id = 1; // When specified, the result will be a list containing just this bucket, if // it's present in the account, or no buckets if the account does not have a // bucket with this ID. string bucket_id = 2; // When specified, the result will be a list containing just this bucket, if // it's present in the account, or no buckets if the account does not have a // bucket with this ID. string bucket_name = 3; // If present, B2 will use it as a filter for bucket types returned in the // list buckets response. If not present, only buckets with bucket types // "allPublic", "allPrivate" and "snapshot" will be returned. A special // filter value of ["all"] will return all bucket types. // // If present, it must be in the form of a json array of strings containing // valid bucket types in quotes and separated by a comma. Valid bucket types // include "allPrivate", "allPublic", "snapshot", and other values added in // the future. // // A bad request error will be returned if "all" is used with other bucket // types, this field is empty, or invalid bucket types are requested. repeated string bucket_types = 4; } message LifecycleRule { // After a file is uploaded, the number of days before it can be hidden. int32 days_from_uploading_to_hiding = 1; // After a file is hidden, the number of days before it can be deleted. int32 days_from_hiding_to_deleting = 2; // The rule applies to files whose names start with this prefix. string file_name_prefix = 3; } message CorsRule { // A name for humans to recognize the rule in a user interface. Names must be // unique within a bucket. Names can consist of upper-case and lower-case // English letters, numbers, and "-". No other characters are allowed. A name // must be at least 6 characters long, and can be at most 50 characters long. // These are all allowed names: myPhotosSite, allowAnyHttps, // backblaze-images. Names that start with "b2-" are reserved for Backblaze // use. string cors_rule_name = 1; // A non-empty list specifying which origins the rule covers. Each value may // have one of many formats: // // * The origin can be fully specified, such as http://www.example.com:8180 // or https://www.example.com:4433. // // * The origin can omit a default port, such as https://www.example.com. // // * The origin may have a single '*' as part of the domain name, such as // https://*.example.com, https://*:8443 or https://*. // // * The origin may be 'https' to match any origin that uses HTTPS. (This is // broader than 'https://*' because it matches any port.) // // * Finally, the origin can be a single '*' to match any origin. // // If any entry is "*", it must be the only entry. There can be at most one // "https" entry and no entry after it may start with "https:". repeated string allowed_origins = 2; // A list specifying which operations the rule allows. At least one value // must be specified. All values must be from the following list. More values // may be added to this list at any time. // // b2_download_file_by_name // b2_download_file_by_id // b2_upload_file // b2_upload_part repeated string allowed_operations = 3; // If present, this is a list of headers that are allowed in a pre-flight // OPTIONS's request's Access-Control-Request-Headers header value. Each // value may have one of many formats: // // * It may be a complete header name, such as x-bz-content-sha1. // // * It may end with an asterisk, such as x-bz-info-*. // // * Finally, it may be a single '*' to match any header. // // If any entry is "*", it must be the only entry in the list. If this list // is missing, it is be treated as if it is a list with no entries. repeated string allowed_headers = 4; // If present, this is a list of headers that may be exposed to an // application inside the client (eg. exposed to Javascript in a browser). // Each entry in the list must be a complete header name (eg. // "x-bz-content-sha1"). If this list is missing or empty, no headers will be // exposed. repeated string expose_headers = 5; // This specifies the maximum number of seconds that a browser may cache the // response to a preflight request. The value must not be negative and it // must not be more than 86,400 seconds (one day). int32 max_age_seconds = 6; } message Bucket { string account_id = 1; string bucket_id = 2; string bucket_name = 3; string bucket_type = 4; map bucket_info = 5; repeated CorsRule cores_rules = 6; repeated LifecycleRule lifecycle_rules = 7; int32 revision = 8; } message ListBucketsResponse { repeated Bucket buckets = 1; } message GetUploadUrlRequest { string bucket_id = 1; } message GetUploadUrlResponse { string bucket_id = 1; string upload_url = 2; string authorization_token = 3; } message UploadFileResponse { string file_id = 1; string file_name = 2; string account_id = 3; string bucket_id = 4; int32 content_length = 5; string content_sha1 = 6; string content_type = 7; map file_info = 8; string action = 9; int64 upload_timestamp = 10; } message StartLargeFileRequest { string bucket_id = 1; string file_name = 2; string content_type = 3; map file_info = 4; } message StartLargeFileResponse { string file_id = 1; string file_name = 2; string account_id = 3; string bucket_id = 4; string content_type = 5; map file_info = 6; int64 upload_timestamp = 7; } message GetUploadPartUrlRequest { string file_id = 1; } message GetUploadPartUrlResponse { string file_id = 1; string upload_url = 2; string authorization_token = 3; } message FinishLargeFileRequest { string file_id = 1; repeated string part_sha1_array = 2; // string sha1 = 3; } message FinishLargeFileResponse { string file_id = 1; string file_name = 2; string account_id = 3; string bucket_id = 4; int64 content_length = 5; string content_sha1 = 6; // always "none" string content_type = 7; map file_info = 8; string action = 9; int64 upload_timestamp = 10; } message ListFileVersionsRequest { string bucket_id = 1; string start_file_name = 2; string start_file_id = 3; int32 max_file_count = 4; string prefix = 5; string delimiter = 6; } message ListFileVersionsResponse { repeated File files = 1; string next_file_name = 2; string next_file_id = 3; } message File { string file_id = 1; string file_name = 2; int64 content_length = 3; string content_type = 4; string content_sha1 = 5; map file_info = 6; string action = 7; int64 size = 8; int64 upload_timestamp = 9; } service PyreService { // Used to log in to the B2 API. Returns an authorization token that can be // used for account-level operations, and a URL that should be used as the // base URL for subsequent API calls. rpc AuthorizeAccount(AuthorizeAccountRequest) returns (AuthorizeAccountResponse) { option (google.api.http) = { get: "/b2api/v1/b2_authorize_account" }; } // Lists buckets associated with an account, in alphabetical order by bucket // name. rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) { option (google.api.http) = { post: "/b2api/v1/b2_list_buckets" body: "*" }; } // Creates a new bucket. A bucket belongs to the account used to create it. // // Buckets can be named. The name must be globally unique. No account can use // a bucket with the same name. Buckets are assigned a unique bucketId which // is used when uploading, downloading, or deleting files. // // There is a limit of 100 buckets per account. rpc CreateBucket(Bucket) returns (Bucket) { option (google.api.http) = { post: "/b2api/v1/b2_create_bucket" body: "*" }; } // Deletes the bucket specified. Only buckets that contain no version of any // files can be deleted. rpc DeleteBucket(Bucket) returns (Bucket) { option (google.api.http) = { post: "/b2api/v1/b2_delete_bucket" body: "*" }; } rpc GetUploadUrl(GetUploadUrlRequest) returns (GetUploadUrlResponse) { option (google.api.http) = { post: "/b2api/v1/b2_get_upload_url" body: "*" }; } // Prepares for uploading the parts of a large file. rpc StartLargeFile(StartLargeFileRequest) returns (StartLargeFileResponse) { option (google.api.http) = { post: "/b2api/v1/b2_start_large_file" body: "*" }; } // Gets an URL to use for uploading parts of a large file. rpc GetUploadPartUrl(GetUploadPartUrlRequest) returns (GetUploadPartUrlResponse) { option (google.api.http) = { post: "/b2api/v1/b2_get_upload_part_url" body: "*" }; } // Converts the parts that have been uploaded into a single B2 file. rpc FinishLargeFile(FinishLargeFileRequest) returns (FinishLargeFileResponse) { option (google.api.http) = { post: "/b2api/v1/b2_finish_large_file" body: "*" }; } // Lists all of the versions of all of the files contained in one bucket, in // alphabetical order by file name, and by reverse of date/time uploaded for // versions of files with the same name. rpc ListFileVersions(ListFileVersionsRequest) returns (ListFileVersionsResponse) { option (google.api.http) = { post: "/b2api/v1/b2_list_file_versions" body: "*" }; } } blazer-0.6.1/internal/pyre/pyre.go000066400000000000000000000020751451327606200171100ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package pyre provides a gRPC-based implementation of B2, as well as a // RESTful gateway on top of it. package pyre //go:generate protoc -I/usr/local/include -I. -I$GOPATH/src -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis --grpc-gateway_out=logtostderr=true:. proto/pyre.proto //go:generate protoc -I/usr/local/include -I. -I$GOPATH/src -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis --go_out=plugins=grpc:. proto/pyre.proto blazer-0.6.1/internal/pyre/simple.go000066400000000000000000000053311451327606200174200ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pyre import ( "encoding/json" "fmt" "io" "net/http" "strconv" "strings" "github.com/Backblaze/blazer/internal/b2types" "github.com/google/uuid" ) const uploadFilePrefix = "/b2api/v1/b2_upload_file/" type SimpleFileManager interface { Writer(bucket, name, id string) (io.WriteCloser, error) } type simpleFileServer struct { fm SimpleFileManager } type uploadRequest struct { name string contentType string size int64 sha1 string bucket string info map[string]string } func parseUploadHeaders(r *http.Request) (*uploadRequest, error) { ur := &uploadRequest{info: make(map[string]string)} ur.name = r.Header.Get("X-Bz-File-Name") ur.contentType = r.Header.Get("Content-Type") ur.sha1 = r.Header.Get("X-Bz-Content-Sha1") size, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) if err != nil { return nil, err } ur.size = size for k := range r.Header { if !strings.HasPrefix("X-Bz-Info-", k) { continue } name := strings.TrimPrefix("X-Bz-Info-", k) ur.info[name] = r.Header.Get(k) } ur.bucket = strings.TrimPrefix(r.URL.Path, uploadFilePrefix) return ur, nil } func (fs *simpleFileServer) ServeHTTP(rw http.ResponseWriter, r *http.Request) { req, err := parseUploadHeaders(r) if err != nil { http.Error(rw, err.Error(), 500) fmt.Println("oh no") return } id := uuid.New().String() w, err := fs.fm.Writer(req.bucket, req.name, id) if err != nil { http.Error(rw, err.Error(), 500) fmt.Println("oh no") return } if _, err := io.Copy(w, io.LimitReader(r.Body, req.size)); err != nil { w.Close() http.Error(rw, err.Error(), 500) fmt.Println("oh no") return } if err := w.Close(); err != nil { http.Error(rw, err.Error(), 500) fmt.Println("oh no") return } resp := &b2types.UploadFileResponse{ FileID: id, Name: req.name, SHA1: req.sha1, BucketID: req.bucket, } if err := json.NewEncoder(rw).Encode(resp); err != nil { http.Error(rw, err.Error(), 500) fmt.Println("oh no") return } } func RegisterSimpleFileManagerOnMux(f SimpleFileManager, mux *http.ServeMux) { mux.Handle(uploadFilePrefix, &simpleFileServer{fm: f}) } blazer-0.6.1/x/000077500000000000000000000000001451327606200132525ustar00rootroot00000000000000blazer-0.6.1/x/consistent/000077500000000000000000000000001451327606200154435ustar00rootroot00000000000000blazer-0.6.1/x/consistent/consistent.go000066400000000000000000000254671451327606200202010ustar00rootroot00000000000000// Copyright 2016, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package consistent implements an experimental interface for using B2 as a // coordination primitive. package consistent import ( "bytes" "context" "crypto/rand" "encoding/base64" "encoding/json" "errors" "fmt" "io" "io/ioutil" "reflect" "time" "github.com/Backblaze/blazer/b2" ) const metaKey = "blazer-meta-key-no-touchie" var ( errUpdateConflict = errors.New("update conflict") errNotInGroup = errors.New("not in group") ) // NewGroup creates a new consistent Group for the given bucket. func NewGroup(bucket *b2.Bucket, name string) *Group { return &Group{ name: name, b: bucket, } } // Group represents a collection of B2 objects that can be modified in a // consistent way. Objects in the same group contend with each other for // updates, but there can only be so many (maximum of 10; fewer if there are // other bucket attributes set) groups in a given bucket. type Group struct { name string b *b2.Bucket ba *b2.BucketAttrs } // Mutex returns a new mutex on the given group. Only one caller can hold the // lock on a mutex with a given name, for a given group. func (g *Group) Mutex(ctx context.Context, name string) *Mutex { return &Mutex{ g: g, name: name, ctx: ctx, } } // Operate calls f with the contents of the group object given by name, and // updates that object with the output of f if f returns no error. Operate // guarantees that no other callers have modified the contents of name in the // meantime (as long as all other callers are using this package). It may call // f any number of times and, as a result, the potential data transfer is // unbounded. Callers should have f fail after a given number of attempts if // this is unacceptable. // // The io.Reader that f returns is guaranteed to be read until at least the // first error. Callers must ensure that this is sufficient for the reader to // clean up after itself. func (g *Group) OperateStream(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error { for { r, err := g.NewReader(ctx, name) if err != nil && err != errNotInGroup { return err } out, err := f(r) r.Close() if err != nil { return err } defer io.Copy(ioutil.Discard, out) // ensure the reader is read w, err := g.NewWriter(ctx, r.Key, name) if err != nil { return err } if _, err := io.Copy(w, out); err != nil { return err } if err := w.Close(); err != nil { if err == errUpdateConflict { continue } return err } return nil } } // Operate uses OperateStream to act on byte slices. func (g *Group) Operate(ctx context.Context, name string, f func([]byte) ([]byte, error)) error { return g.OperateStream(ctx, name, func(r io.Reader) (io.Reader, error) { b, err := ioutil.ReadAll(r) if b2.IsNotExist(err) { b = nil err = nil } if err != nil { return nil, err } bs, err := f(b) if err != nil { return nil, err } return bytes.NewReader(bs), nil }) } // OperateJSON is a convenience function for transforming JSON data in B2 in a // consistent way. Callers should pass a function f which accepts a pointer to // a struct of a given type and transforms it into another struct (ideally but // not necessarily of the same type). Callers should also pass an example // struct, t, or a pointer to it, that is the same type. t will not be // altered. If there is no existing file, f will be called with an pointer to // an empty struct of type t. Otherwise, it will be called with a pointer to a // struct filled out with the given JSON. func (g *Group) OperateJSON(ctx context.Context, name string, t interface{}, f func(interface{}) (interface{}, error)) error { jsonType := reflect.TypeOf(t) for jsonType.Kind() == reflect.Ptr { jsonType = jsonType.Elem() } return g.OperateStream(ctx, name, func(r io.Reader) (io.Reader, error) { in := reflect.New(jsonType).Interface() if err := json.NewDecoder(r).Decode(in); err != nil && err != io.EOF && !b2.IsNotExist(err) { return nil, err } out, err := f(in) if err != nil { return nil, err } pr, pw := io.Pipe() go func() { pw.CloseWithError(json.NewEncoder(pw).Encode(out)) }() return closeAfterReading{rc: pr}, nil }) } // closeAfterReading closes the underlying reader on the first non-nil error type closeAfterReading struct { rc io.ReadCloser } func (car closeAfterReading) Read(p []byte) (int, error) { n, err := car.rc.Read(p) if err != nil { car.rc.Close() } return n, err } // Writer is an io.ReadCloser. type Writer struct { ctx context.Context wc io.WriteCloser name string suffix string key string g *Group } // Write implements io.Write. func (w Writer) Write(p []byte) (int, error) { return w.wc.Write(p) } // Close writes any remaining data into B2 and updates the group to reflect the // contents of the new object. If the group object has been modified, Close() // will fail. func (w Writer) Close() error { if err := w.wc.Close(); err != nil { return err } // TODO: maybe see if you can cut down on calls to info() for { ci, err := w.g.info(w.ctx) if err != nil { // Replacement failed; delete the new version. w.g.b.Object(w.name + "/" + w.suffix).Delete(w.ctx) return err } old, ok := ci.Locations[w.name] if ok && old != w.key { w.g.b.Object(w.name + "/" + w.suffix).Delete(w.ctx) return errUpdateConflict } ci.Locations[w.name] = w.suffix if err := w.g.save(w.ctx, ci); err != nil { if err == errUpdateConflict { continue } w.g.b.Object(w.name + "/" + w.suffix).Delete(w.ctx) return err } // Replacement successful; delete the old version. w.g.b.Object(w.name + "/" + w.key).Delete(w.ctx) return nil } } // Reader is an io.ReadCloser. Key must be passed to NewWriter. type Reader struct { r io.ReadCloser Key string } func (r Reader) Read(p []byte) (int, error) { if r.r == nil { return 0, io.EOF } return r.r.Read(p) } func (r Reader) Close() error { if r.r == nil { return nil } return r.r.Close() } // NewWriter creates a Writer and prepares it to be updated. The key argument // should come from the Key field of a Reader; if Writer.Close() returns with // no error, then the underlying group object was successfully updated from the // data available from the Reader with no intervening writes. New objects can // be created with an empty key. func (g *Group) NewWriter(ctx context.Context, key, name string) (Writer, error) { suffix, err := random() if err != nil { return Writer{}, err } return Writer{ ctx: ctx, wc: g.b.Object(name + "/" + suffix).NewWriter(ctx), name: name, suffix: suffix, key: key, g: g, }, nil } // NewReader creates a Reader with the current version of the object, as well // as that object's update key. func (g *Group) NewReader(ctx context.Context, name string) (Reader, error) { ci, err := g.info(ctx) if err != nil { return Reader{}, err } suffix, ok := ci.Locations[name] if !ok { return Reader{}, errNotInGroup } return Reader{ r: g.b.Object(name + "/" + suffix).NewReader(ctx), Key: suffix, }, nil } func (g *Group) info(ctx context.Context) (*consistentInfo, error) { attrs, err := g.b.Attrs(ctx) if err != nil { return nil, err } g.ba = attrs imap := attrs.Info if imap == nil { return nil, nil } enc, ok := imap[metaKey+"-"+g.name] if !ok { return &consistentInfo{ Version: 1, Locations: make(map[string]string), }, nil } b, err := base64.StdEncoding.DecodeString(enc) if err != nil { return nil, err } ci := &consistentInfo{} if err := json.Unmarshal(b, ci); err != nil { return nil, err } if ci.Locations == nil { ci.Locations = make(map[string]string) } return ci, nil } func (g *Group) save(ctx context.Context, ci *consistentInfo) error { ci.Serial++ b, err := json.Marshal(ci) if err != nil { return err } s := base64.StdEncoding.EncodeToString(b) for { oldAI, err := g.info(ctx) if err != nil { return err } if oldAI.Serial != ci.Serial-1 { return errUpdateConflict } if g.ba.Info == nil { g.ba.Info = make(map[string]string) } g.ba.Info[metaKey+"-"+g.name] = s err = g.b.Update(ctx, g.ba) if err == nil { return nil } if !b2.IsUpdateConflict(err) { return err } // Bucket update conflict; try again. } } // List returns a list of all the group objects. func (g *Group) List(ctx context.Context) ([]string, error) { ci, err := g.info(ctx) if err != nil { return nil, err } var l []string for name := range ci.Locations { l = append(l, name) } return l, nil } // A Mutex is a sync.Locker that is backed by data in B2. type Mutex struct { g *Group name string ctx context.Context } // Lock locks the mutex. If the mutex is already locked, lock will wait, // polling at 1 second intervals, until it can acquire the lock. func (m *Mutex) Lock() { cont := errors.New("continue") for { err := m.g.Operate(m.ctx, m.name, func(b []byte) ([]byte, error) { if len(b) != 0 { return nil, cont } return []byte{1}, nil }) if err == nil { return } if err != cont { panic(err) } time.Sleep(time.Second) } } // Unlock unconditionally unlocks the mutex. This allows programs to clear // stale locks. func (m *Mutex) Unlock() { if err := m.g.Operate(m.ctx, m.name, func([]byte) ([]byte, error) { return nil, nil }); err != nil { panic(err) } } type consistentInfo struct { Version int // Serial is incremented for every version saved. If we ensure that // current.Serial = 1 + previous.Serial, and that the bucket metadata is // updated cleanly, then we know that the version we saved is the direct // successor to the version we had. If the bucket metadata doesn't update // cleanly, but the serial relation holds true for the new AI struct, then we // can retry without bothering the user. However, if the serial relation no // longer holds true, it means someone else has updated AI and we have to ask // the user to redo everything they've done. // // However, it is still necessary for higher level constructs to confirm that // the serial number they expect is good. The writer does this, for example, // by comparing the "key" of the file it is replacing. Serial int Locations map[string]string } func random() (string, error) { b := make([]byte, 20) if _, err := rand.Read(b); err != nil { return "", err } return fmt.Sprintf("%x", b), nil } blazer-0.6.1/x/consistent/consistent_test.go000066400000000000000000000075111451327606200212260ustar00rootroot00000000000000package consistent import ( "context" "io/ioutil" "os" "strconv" "sync" "sync/atomic" "testing" "time" "github.com/Backblaze/blazer/b2" ) const ( apiID = "B2_ACCOUNT_ID" apiKey = "B2_SECRET_KEY" bucketName = "consistobucket" ) func TestOperationLive(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() g := NewGroup(bucket, "tester") name := "some_kinda_name/thing.txt" var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) i := i go func() { defer wg.Done() for j := 0; j < 10; j++ { var n int if err := g.Operate(ctx, name, func(b []byte) ([]byte, error) { if len(b) > 0 { i, err := strconv.Atoi(string(b)) if err != nil { return nil, err } n = i } return []byte(strconv.Itoa(n + 1)), nil }); err != nil { t.Error(err) } t.Logf("thread %d: successful %d++", i, n) } }() } wg.Wait() r, err := g.NewReader(ctx, name) if err != nil { t.Fatal(err) } defer r.Close() b, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } n, err := strconv.Atoi(string(b)) if err != nil { t.Fatal(err) } if n != 100 { t.Errorf("result: got %d, want 100", n) } } type jsonThing struct { Boop int `json:"boop_field"` Thread int `json:"thread_id"` } func TestOperationJSONLive(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() g := NewGroup(bucket, "tester") name := "some_kinda_json/thing.json" var wg sync.WaitGroup for i := 0; i < 4; i++ { wg.Add(1) i := i go func() { var n int defer wg.Done() for j := 0; j < 4; j++ { // Pass both a struct and a pointer to a struct. var face interface{} face = jsonThing{} if j%2 == 0 { face = &jsonThing{} } if err := g.OperateJSON(ctx, name, face, func(j interface{}) (interface{}, error) { jt := j.(*jsonThing) n = jt.Boop return &jsonThing{ Boop: jt.Boop + 1, Thread: i, }, nil }); err != nil { t.Error(err) } t.Logf("thread %d: successful %d++", i, n) } }() } wg.Wait() if err := g.OperateJSON(ctx, name, &jsonThing{}, func(i interface{}) (interface{}, error) { jt := i.(*jsonThing) if jt.Boop != 16 { t.Errorf("got %d boops; want 16", jt.Boop) } return nil, nil }); err != nil { t.Error(err) } } func TestMutex(t *testing.T) { ctx := context.Background() bucket, done := startLiveTest(ctx, t) defer done() g := NewGroup(bucket, "tester") m := g.Mutex(ctx, "mootex") var a int32 var wg sync.WaitGroup for i := 0; i < 5; i++ { wg.Add(1) go func(i int) { defer wg.Done() for j := 0; j < 5; j++ { m.Lock() new := atomic.AddInt32(&a, 1) if new != 1 { t.Fatalf("two threads locked at once") } time.Sleep(20 * time.Millisecond) new = atomic.AddInt32(&a, -1) if new != 0 { t.Fatalf("two threads locked at once") } t.Logf("thread %d: lock %d", i, j) m.Unlock() } }(i) } wg.Wait() } func startLiveTest(ctx context.Context, t *testing.T) (*b2.Bucket, func()) { id := os.Getenv(apiID) key := os.Getenv(apiKey) if id == "" || key == "" { t.Skipf("B2_ACCOUNT_ID or B2_SECRET_KEY unset; skipping integration tests") return nil, nil } client, err := b2.NewClient(ctx, id, key) if err != nil { t.Fatal(err) return nil, nil } bucket, err := client.NewBucket(ctx, id+"-"+bucketName, nil) if err != nil { t.Fatal(err) return nil, nil } f := func() { iter := bucket.List(ctx, b2.ListHidden()) for iter.Next() { if err := iter.Object().Delete(ctx); err != nil { t.Error(err) } } if err := iter.Err(); err != nil && !b2.IsNotExist(err) { t.Error(err) } if err := bucket.Delete(ctx); err != nil && !b2.IsNotExist(err) { t.Error(err) } } return bucket, f } type object struct { o *b2.Object err error } blazer-0.6.1/x/transport/000077500000000000000000000000001451327606200153065ustar00rootroot00000000000000blazer-0.6.1/x/transport/transport.go000066400000000000000000000121701451327606200176720ustar00rootroot00000000000000// Copyright 2017, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package transport provides http.RoundTrippers that may be useful to clients // of Blazer. package transport import ( "context" "fmt" "io" "io/ioutil" "math/rand" "net/http" "strings" "sync/atomic" "time" ) // WithFailures returns an http.RoundTripper that wraps an existing // RoundTripper, causing failures according to the options given. If rt is // nil, the http.DefaultTransport is wrapped. func WithFailures(rt http.RoundTripper, opts ...FailureOption) http.RoundTripper { if rt == nil { rt = http.DefaultTransport } o := &options{ rt: rt, } for _, opt := range opts { opt(o) } return o } type options struct { pathSubstrings []string failureRate float64 status int stall time.Duration rt http.RoundTripper msg string trg *triggerReaderGroup } func (o *options) doRequest(req *http.Request) (*http.Response, error) { if o.trg != nil && req.Body != nil { req.Body = o.trg.new(req.Body) } resp, err := o.rt.RoundTrip(req) if resp != nil && o.trg != nil { resp.Body = o.trg.new(resp.Body) } return resp, err } func (o *options) RoundTrip(req *http.Request) (*http.Response, error) { // TODO: fix triggering conditions if rand.Float64() > o.failureRate { return o.doRequest(req) } var match bool if len(o.pathSubstrings) == 0 { match = true } for _, ss := range o.pathSubstrings { if strings.Contains(req.URL.Path, ss) { match = true break } } if !match { return o.doRequest(req) } if o.status > 0 { resp := &http.Response{ Status: fmt.Sprintf("%d %s", o.status, http.StatusText(o.status)), StatusCode: o.status, Body: ioutil.NopCloser(strings.NewReader(o.msg)), Request: req, } return resp, nil } if o.stall > 0 { ctx := req.Context() select { case <-time.After(o.stall): case <-ctx.Done(): } } return o.doRequest(req) } // A FailureOption specifies the kind of failure that the RoundTripper should // display. type FailureOption func(*options) // MatchPathSubstring restricts the RoundTripper to URLs whose paths contain // the given string. The default behavior is to match all paths. func MatchPathSubstring(s string) FailureOption { return func(o *options) { o.pathSubstrings = append(o.pathSubstrings, s) } } // FailureRate causes the RoundTripper to fail a certain percentage of the // time. rate should be a number between 0 and 1, where 0 will never fail and // 1 will always fail. The default is never to fail. func FailureRate(rate float64) FailureOption { return func(o *options) { o.failureRate = rate } } // Response simulates a given status code. The returned http.Response will // have its Status, StatusCode, and Body (with any predefined message) set. func Response(status int) FailureOption { return func(o *options) { o.status = status } } // Stall simulates a network connection failure by stalling for the given // duration. func Stall(dur time.Duration) FailureOption { return func(o *options) { o.stall = dur } } // If a specific Response is requested, the body will have the given message // set. func Body(msg string) FailureOption { return func(o *options) { o.msg = msg } } // Trigger will raise the RoundTripper's failure rate to 100% when the given // context is closed. func Trigger(ctx context.Context) FailureOption { return func(o *options) { go func() { <-ctx.Done() o.failureRate = 1 }() } } // AfterNBytes will call effect once (roughly) n bytes have gone over the wire. // Both sent and received bytes are counted against the total. Only bytes in // the body of an HTTP request are currently counted; this may change in the // future. effect will only be called once, and it will block (allowing // callers to simulate connection hangs). func AfterNBytes(n int, effect func()) FailureOption { return func(o *options) { o.trg = &triggerReaderGroup{ bytes: int64(n), trigger: effect, } } } type triggerReaderGroup struct { bytes int64 trigger func() triggered int64 } func (rg *triggerReaderGroup) new(rc io.ReadCloser) io.ReadCloser { return &triggerReader{ ReadCloser: rc, bytes: &rg.bytes, trigger: rg.trigger, triggered: &rg.triggered, } } type triggerReader struct { io.ReadCloser bytes *int64 trigger func() triggered *int64 } func (r *triggerReader) Read(p []byte) (int, error) { n, err := r.ReadCloser.Read(p) if atomic.AddInt64(r.bytes, -int64(n)) < 0 && atomic.CompareAndSwapInt64(r.triggered, 0, 1) { // Can't use sync.Once because it blocks for *all* callers until Do returns. r.trigger() } return n, err } blazer-0.6.1/x/window/000077500000000000000000000000001451327606200145615ustar00rootroot00000000000000blazer-0.6.1/x/window/accum_test.go000066400000000000000000000025301451327606200172370ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package window_test import ( "fmt" "time" "github.com/Backblaze/blazer/x/window" ) type Accumulator struct { w *window.Window } func (a Accumulator) Add(s string) { a.w.Insert([]string{s}) } func (a Accumulator) All() []string { v := a.w.Reduce() return v.([]string) } func NewAccum(size time.Duration) Accumulator { r := func(i, j interface{}) interface{} { a, ok := i.([]string) if !ok { a = nil } b, ok := j.([]string) if !ok { b = nil } for _, s := range b { a = append(a, s) } return a } return Accumulator{w: window.New(size, time.Second, r)} } func Example_accumulator() { a := NewAccum(time.Minute) a.Add("this") a.Add("is") a.Add("that") fmt.Printf("total: %v\n", a.All()) // Output: // total: [this is that] } blazer-0.6.1/x/window/counter_test.go000066400000000000000000000023071451327606200176300ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package window_test import ( "fmt" "time" "github.com/Backblaze/blazer/x/window" ) type Counter struct { w *window.Window } func (c Counter) Add() { c.w.Insert(1) } func (c Counter) Count() int { v := c.w.Reduce() return v.(int) } func New(size time.Duration) Counter { r := func(i, j interface{}) interface{} { a, ok := i.(int) if !ok { a = 0 } b, ok := j.(int) if !ok { b = 0 } return a + b } return Counter{w: window.New(size, time.Second, r)} } func Example_counter() { c := New(time.Minute) c.Add() c.Add() c.Add() fmt.Printf("total: %d\n", c.Count()) // Output: // total: 3 } blazer-0.6.1/x/window/window.go000066400000000000000000000102761451327606200164250ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package window provides a type for efficiently recording events as they // occur over a given span of time. Events added to the window will remain // until the time expires. package window import ( "sync" "time" ) // A Window efficiently records events that have occurred over a span of time // extending from some fixed interval ago to now. Events that pass beyond this // horizon are discarded. type Window struct { mu sync.Mutex events []interface{} res time.Duration last time.Time reduce Reducer forever bool e interface{} } // A Reducer should take two values from the window and combine them into a // third value that will be stored in the window. The values i or j may be // nil. The underlying types for both arguments and the output should be // identical. // // If the reducer is any kind of slice or list, then data usage will grow // linearly with the number of events added to the window. // // Reducer will be called on its own output: Reducer(Reducer(x, y), z). type Reducer func(i, j interface{}) interface{} // New returns an initialized window for events over the given duration at the // given resolution. Windows with tight resolution (i.e., small values for // that argument) will be more accurate, at the cost of some memory. // // A size of 0 means "forever"; old events will never be removed. func New(size, resolution time.Duration, r Reducer) *Window { if size > 0 { return &Window{ res: resolution, events: make([]interface{}, size/resolution), reduce: r, } } return &Window{ forever: true, reduce: r, } } func (w *Window) bucket(now time.Time) int { nanos := now.UnixNano() abs := nanos / int64(w.res) return int(abs) % len(w.events) } // sweep keeps the window valid. It needs to be called from every method that // views or updates the window, and the caller needs to hold the mutex. func (w *Window) sweep(now time.Time) { if w.forever { return } defer func() { w.last = now }() // This compares now and w.last's monotonic clocks. diff := now.Sub(w.last) if diff < 0 { // time went backwards somehow; zero events and return for i := range w.events { w.events[i] = nil } return } last := now.Add(-diff) b := w.bucket(now) p := w.bucket(last) if b == p && diff <= w.res { // We're in the same bucket as the previous sweep, so all buckets are // valid. return } if diff > w.res*time.Duration(len(w.events)) { // We've gone longer than this window measures since the last sweep, just // zero the thing and have done. for i := range w.events { w.events[i] = nil } return } // Expire all invalid buckets. This means buckets not seen since the // previous sweep and now, including the current bucket but not including the // previous bucket. old := int64(last.UnixNano()) / int64(w.res) new := int64(now.UnixNano()) / int64(w.res) for i := old + 1; i <= new; i++ { b := int(i) % len(w.events) w.events[b] = nil } } // Insert adds the given event. func (w *Window) Insert(e interface{}) { w.insertAt(time.Now(), e) } func (w *Window) insertAt(t time.Time, e interface{}) { w.mu.Lock() defer w.mu.Unlock() if w.forever { w.e = w.reduce(w.e, e) return } w.sweep(t) w.events[w.bucket(t)] = w.reduce(w.events[w.bucket(t)], e) } // Reduce runs the window's reducer over the valid values and returns the // result. func (w *Window) Reduce() interface{} { return w.reducedAt(time.Now()) } func (w *Window) reducedAt(t time.Time) interface{} { w.mu.Lock() defer w.mu.Unlock() if w.forever { return w.e } w.sweep(t) var n interface{} for i := range w.events { n = w.reduce(n, w.events[i]) } return n } blazer-0.6.1/x/window/window_test.go000066400000000000000000000054571451327606200174710ustar00rootroot00000000000000// Copyright 2018, the Blazer authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package window import ( "testing" "time" ) type epair struct { e interface{} t time.Time } func adder(i, j interface{}) interface{} { a, ok := i.(int) if !ok { a = 0 } b, ok := j.(int) if !ok { b = 0 } return a + b } func TestWindows(t *testing.T) { table := []struct { size, dur time.Duration incs []epair look time.Time reduce Reducer want interface{} }{ { size: time.Minute, dur: time.Second, incs: []epair{ // year, month, day, hour, min, sec, nano {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 5, 0, time.UTC), e: 1}, }, look: time.Date(2000, 1, 1, 0, 1, 0, 0, time.UTC), want: 5, reduce: adder, }, { incs: []epair{ // year, month, day, hour, min, sec, nano {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 5, 0, time.UTC), e: 1}, }, want: 6, reduce: adder, }, { // what happens if time goes backwards? size: time.Minute, dur: time.Second, incs: []epair{ {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 1, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 2, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 3, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 4, 0, time.UTC), e: 1}, {t: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), e: 1}, }, look: time.Date(2000, 1, 1, 0, 0, 30, 0, time.UTC), want: 1, reduce: adder, }, } for _, e := range table { w := New(e.size, e.dur, e.reduce) for _, inc := range e.incs { w.insertAt(inc.t, inc.e) } ct := w.reducedAt(e.look) if ct != e.want { t.Errorf("reducedAt(%v) got %v, want %v", e.look, ct, e.want) } } }