pax_global_header00006660000000000000000000000064136751564630014532gustar00rootroot0000000000000052 comment=48358e1de5110852097ebbc11c53581d64d47300 azure-storage-blob-go-0.10.0/000077500000000000000000000000001367515646300157175ustar00rootroot00000000000000azure-storage-blob-go-0.10.0/.github/000077500000000000000000000000001367515646300172575ustar00rootroot00000000000000azure-storage-blob-go-0.10.0/.github/ISSUE_TEMPLATE.md000066400000000000000000000003601367515646300217630ustar00rootroot00000000000000### Which version of the SDK was used? ### Which platform are you using? (ex: Windows, Linux, Debian) ### What problem was encountered? ### How can we reproduce the problem in the simplest way? ### Have you found a mitigation/solution? azure-storage-blob-go-0.10.0/.gitignore000066400000000000000000000114301367515646300177060ustar00rootroot00000000000000## ignore .DS_Store on macOS *.DS_Store ## Ignore Visual Studio temporary files, build results, and ## files generated by popular Visual Studio add-ons. ## ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore # User-specific files *.suo *.user *.userosscache *.sln.docstates # User-specific files (MonoDevelop/Xamarin Studio) *.userprefs # Build results [Dd]ebug/ [Dd]ebugPublic/ [Rr]elease/ [Rr]eleases/ x64/ x86/ bld/ [Bb]in/ [Oo]bj/ [Ll]og/ # Visual Studio 2015 cache/options directory .vs/ # Uncomment if you have tasks that create the project's static files in wwwroot #wwwroot/ # MSTest test Results [Tt]est[Rr]esult*/ [Bb]uild[Ll]og.* # NUNIT *.VisualState.xml TestResult.xml # Build Results of an ATL Project [Dd]ebugPS/ [Rr]eleasePS/ dlldata.c # .NET Core project.lock.json project.fragment.lock.json artifacts/ **/Properties/launchSettings.json *_i.c *_p.c *_i.h *.ilk *.meta *.obj *.pch *.pdb *.pgc *.pgd *.rsp *.sbr *.tlb *.tli *.tlh *.tmp *.tmp_proj *.log *.vspscc *.vssscc .builds *.pidb *.svclog *.scc # Chutzpah Test files _Chutzpah* # Visual C++ cache files ipch/ *.aps *.ncb *.opendb *.opensdf *.sdf *.cachefile *.VC.db *.VC.VC.opendb # Visual Studio profiler *.psess *.vsp *.vspx *.sap # TFS 2012 Local Workspace $tf/ # Guidance Automation Toolkit *.gpState # ReSharper is a .NET coding add-in _ReSharper*/ *.[Rr]e[Ss]harper *.DotSettings.user # JustCode is a .NET coding add-in .JustCode # TeamCity is a build add-in _TeamCity* # DotCover is a Code Coverage Tool *.dotCover # Visual Studio code coverage results *.coverage *.coveragexml # NCrunch _NCrunch_* .*crunch*.local.xml nCrunchTemp_* # MightyMoose *.mm.* AutoTest.Net/ # Web workbench (sass) .sass-cache/ # Installshield output folder [Ee]xpress/ # DocProject is a documentation generator add-in DocProject/buildhelp/ DocProject/Help/*.HxT DocProject/Help/*.HxC DocProject/Help/*.hhc DocProject/Help/*.hhk DocProject/Help/*.hhp DocProject/Help/Html2 DocProject/Help/html # Click-Once directory publish/ # Publish Web Output *.[Pp]ublish.xml *.azurePubxml # TODO: Comment the next line if you want to checkin your web deploy settings # but database connection strings (with potential passwords) will be unencrypted *.pubxml *.publishproj # Microsoft Azure Web App publish settings. Comment the next line if you want to # checkin your Azure Web App publish settings, but sensitive information contained # in these scripts will be unencrypted PublishScripts/ # NuGet Packages *.nupkg # The packages folder can be ignored because of Package Restore **/packages/* # except build/, which is used as an MSBuild target. !**/packages/build/ # Uncomment if necessary however generally it will be regenerated when needed #!**/packages/repositories.config # NuGet v3's project.json files produces more ignorable files *.nuget.props *.nuget.targets # Microsoft Azure Build Output csx/ *.build.csdef # Microsoft Azure Emulator ecf/ rcf/ # Windows Store app package directories and files AppPackages/ BundleArtifacts/ Package.StoreAssociation.xml _pkginfo.txt # Visual Studio cache files # files ending in .cache can be ignored *.[Cc]ache # but keep track of directories ending in .cache !*.[Cc]ache/ # Others ClientBin/ ~$* *~ *.dbmdl *.dbproj.schemaview *.jfm *.pfx *.publishsettings orleans.codegen.cs # Since there are multiple workflows, uncomment next line to ignore bower_components # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) #bower_components/ # RIA/Silverlight projects Generated_Code/ # Backup & report files from converting an old project file # to a newer Visual Studio version. Backup files are not needed, # because we have git ;-) _UpgradeReport_Files/ Backup*/ UpgradeLog*.XML UpgradeLog*.htm # SQL Server files *.mdf *.ldf *.ndf # Business Intelligence projects *.rdl.data *.bim.layout *.bim_*.settings # Microsoft Fakes FakesAssemblies/ # GhostDoc plugin setting file *.GhostDoc.xml # Node.js Tools for Visual Studio .ntvs_analysis.dat node_modules/ # Typescript v1 declaration files typings/ # Visual Studio 6 build log *.plg # Visual Studio 6 workspace options file *.opt # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) *.vbw # Visual Studio LightSwitch build output **/*.HTMLClient/GeneratedArtifacts **/*.DesktopClient/GeneratedArtifacts **/*.DesktopClient/ModelManifest.xml **/*.Server/GeneratedArtifacts **/*.Server/ModelManifest.xml _Pvt_Extensions # Paket dependency manager .paket/paket.exe paket-files/ # FAKE - F# Make .fake/ # JetBrains Rider .idea/ *.sln.iml # CodeRush .cr/ # Python Tools for Visual Studio (PTVS) __pycache__/ *.pyc # Cake - Uncomment if you are using it # tools/** # !tools/packages.config # Telerik's JustMock configuration file *.jmconfig # BizTalk build output *.btp.cs *.btm.cs *.odx.cs *.xsd.cs vendor/ *.env azure-storage-blob-go-0.10.0/.travis.yml000066400000000000000000000003451367515646300200320ustar00rootroot00000000000000language: go go: - "1.13" script: - export GO111MODULE=on - GOOS=linux go build ./azblob - GOOS=darwin go build ./azblob - GOOS=windows go build ./azblob - GOOS=solaris go build ./azblob - go test -race -short -cover -v ./azblob azure-storage-blob-go-0.10.0/BreakingChanges.md000066400000000000000000000003561367515646300212600ustar00rootroot00000000000000# Breaking Changes > See the [Change Log](ChangeLog.md) for a summary of storage library changes. ## Version 0.3.0: - Removed most panics from the library. Several functions now return an error. - Removed 2016 and 2017 service versions.azure-storage-blob-go-0.10.0/ChangeLog.md000066400000000000000000000020671367515646300200750ustar00rootroot00000000000000# Change Log > See [BreakingChanges](BreakingChanges.md) for a detailed list of API breaks. ## Version 0.10.0: - Added support for CopyBlobFromURL (sync) and upgrade version to 2019-02-02. - Provided default values for UploadStreamToBlockBlobOptions and refactored UploadStreamToBlockBlob. - Added support for multiple start/expiry time formats. - Added Solaris support. - Enabled recovering from a unexpectedEOF error. ## Version 0.9.0: - Updated go.mod to fix dependency issues. ## Version 0.8.0: - Fixed error handling in high-level function DoBatchTransfer, and made it public for easy customization ## Version 0.7.0: - Added the ability to obtain User Delegation Keys (UDK) - Added the ability to create User Delegation SAS tokens from UDKs - Added support for generating and using blob snapshot SAS tokens - General secondary host improvements ## Version 0.3.0: - Removed most panics from the library. Several functions now return an error. - Removed 2016 and 2017 service versions. - Added support for module. - Fixed chunking bug in highlevel function uploadStream.azure-storage-blob-go-0.10.0/Gopkg.lock000066400000000000000000000156151367515646300176500ustar00rootroot00000000000000# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. [[projects]] digest = "1:6b1426cad7057b717351eacf5b6fe70f053f11aac1ce254bbf2fd72c031719eb" name = "contrib.go.opencensus.io/exporter/ocagent" packages = ["."] pruneopts = "UT" revision = "dcb33c7f3b7cfe67e8a2cea10207ede1b7c40764" version = "v0.4.12" [[projects]] digest = "1:602649ff074ccee9273e1d3b25c4069f13a70fa0c232957c7d68a6f02fb7a9ea" name = "github.com/Azure/azure-pipeline-go" packages = ["pipeline"] pruneopts = "UT" revision = "105d6349faa1dec531c0b932b5863540c1f6aafb" version = "v0.2.1" [[projects]] digest = "1:d5800d9f8f0d48f84a2a45adeca9eee0e129f7d80b5c3d9770e90a4e5162058b" name = "github.com/Azure/go-autorest" packages = [ "autorest/adal", "autorest/date", "tracing", ] pruneopts = "UT" revision = "09205e8f6711a776499a14cf8adc6bd380db5d81" version = "v12.2.0" [[projects]] digest = "1:fdb4ed936abeecb46a8c27dcac83f75c05c87a46d9ec7711411eb785c213fa02" name = "github.com/census-instrumentation/opencensus-proto" packages = [ "gen-go/agent/common/v1", "gen-go/agent/metrics/v1", "gen-go/agent/trace/v1", "gen-go/metrics/v1", "gen-go/resource/v1", "gen-go/trace/v1", ] pruneopts = "UT" revision = "a105b96453fe85139acc07b68de48f2cbdd71249" version = "v0.2.0" [[projects]] digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" name = "github.com/dgrijalva/jwt-go" packages = ["."] pruneopts = "UT" revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" version = "v3.2.0" [[projects]] digest = "1:489a99067cd08971bd9c1ee0055119ba8febc1429f9200ab0bec68d35e8c4833" name = "github.com/golang/protobuf" packages = [ "jsonpb", "proto", "protoc-gen-go/descriptor", "protoc-gen-go/generator", "protoc-gen-go/generator/internal/remap", "protoc-gen-go/plugin", "ptypes", "ptypes/any", "ptypes/duration", "ptypes/struct", "ptypes/timestamp", "ptypes/wrappers", ] pruneopts = "UT" revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" version = "v1.3.1" [[projects]] digest = "1:c20c9a82345346a19916a0086e61ea97425172036a32b8a8975490da6a129fda" name = "github.com/grpc-ecosystem/grpc-gateway" packages = [ "internal", "runtime", "utilities", ] pruneopts = "UT" revision = "cd0c8ef3533e9c04e6520cac37a81fe262fb0b34" version = "v1.9.2" [[projects]] digest = "1:67474f760e9ac3799f740db2c489e6423a4cde45520673ec123ac831ad849cb8" name = "github.com/hashicorp/golang-lru" packages = ["simplelru"] pruneopts = "UT" revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" version = "v0.5.1" [[projects]] branch = "master" digest = "1:f1df16c368a97edecc18c8c061c278cb6a342450bb83d5da4738e5b330abd522" name = "github.com/mattn/go-ieproxy" packages = ["."] pruneopts = "UT" revision = "91bb50d981495aef1c208d31be3d77d904384f20" [[projects]] digest = "1:4c93890bbbb5016505e856cb06b5c5a2ff5b7217584d33f2a9071ebef4b5d473" name = "go.opencensus.io" packages = [ ".", "internal", "internal/tagencoding", "metric/metricdata", "metric/metricproducer", "plugin/ocgrpc", "plugin/ochttp", "plugin/ochttp/propagation/b3", "plugin/ochttp/propagation/tracecontext", "resource", "stats", "stats/internal", "stats/view", "tag", "trace", "trace/internal", "trace/propagation", "trace/tracestate", ] pruneopts = "UT" revision = "43463a80402d8447b7fce0d2c58edf1687ff0b58" version = "v0.19.3" [[projects]] branch = "master" digest = "1:8f690c88cafc94f162d91fb3eaa1d9826f24c2f86ee7ea46c16bc0a3d3846c19" name = "golang.org/x/net" packages = [ "context", "http/httpguts", "http/httpproxy", "http2", "http2/hpack", "idna", "internal/timeseries", "trace", ] pruneopts = "UT" revision = "da137c7871d730100384dbcf36e6f8fa493aef5b" [[projects]] branch = "master" digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" name = "golang.org/x/sync" packages = ["semaphore"] pruneopts = "UT" revision = "112230192c580c3556b8cee6403af37a4fc5f28c" [[projects]] branch = "master" digest = "1:2c770d8251a8a2127b648f57602d75c8e40457ba070b57b38176013472f31326" name = "golang.org/x/sys" packages = [ "unix", "windows", "windows/registry", ] pruneopts = "UT" revision = "04f50cda93cbb67f2afa353c52f342100e80e625" [[projects]] digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" name = "golang.org/x/text" packages = [ "collate", "collate/build", "internal/colltab", "internal/gen", "internal/language", "internal/language/compact", "internal/tag", "internal/triegen", "internal/ucd", "language", "secure/bidirule", "transform", "unicode/bidi", "unicode/cldr", "unicode/norm", "unicode/rangetable", ] pruneopts = "UT" revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" version = "v0.3.2" [[projects]] digest = "1:5f003878aabe31d7f6b842d4de32b41c46c214bb629bb485387dbcce1edf5643" name = "google.golang.org/api" packages = ["support/bundler"] pruneopts = "UT" revision = "02490b97dff7cfde1995bd77de808fd27053bc87" version = "v0.7.0" [[projects]] branch = "master" digest = "1:3565a93b7692277a5dea355bc47bd6315754f3246ed07a224be6aec28972a805" name = "google.golang.org/genproto" packages = [ "googleapis/api/httpbody", "googleapis/rpc/status", "protobuf/field_mask", ] pruneopts = "UT" revision = "eb59cef1c072c61ea4f7623910448d5e9c6a4455" [[projects]] digest = "1:e8800ddadd6bce3bc0c5ffd7bc55dbdddc6e750956c10cc10271cade542fccbe" name = "google.golang.org/grpc" packages = [ ".", "balancer", "balancer/base", "balancer/roundrobin", "binarylog/grpc_binarylog_v1", "codes", "connectivity", "credentials", "credentials/internal", "encoding", "encoding/proto", "grpclog", "internal", "internal/backoff", "internal/balancerload", "internal/binarylog", "internal/channelz", "internal/envconfig", "internal/grpcrand", "internal/grpcsync", "internal/syscall", "internal/transport", "keepalive", "metadata", "naming", "peer", "resolver", "resolver/dns", "resolver/passthrough", "stats", "status", "tap", ] pruneopts = "UT" revision = "501c41df7f472c740d0674ff27122f3f48c80ce7" version = "v1.21.1" [[projects]] branch = "v1" digest = "1:dcb51660fc1fd7bfa3f45305db912fa587c12c17658fd66b3ab55339b59ffbe6" name = "gopkg.in/check.v1" packages = ["."] pruneopts = "UT" revision = "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec" [solve-meta] analyzer-name = "dep" analyzer-version = 1 input-imports = [ "github.com/Azure/azure-pipeline-go/pipeline", "github.com/Azure/go-autorest/autorest/adal", "gopkg.in/check.v1", ] solver-name = "gps-cdcl" solver-version = 1 azure-storage-blob-go-0.10.0/Gopkg.toml000077500000000000000000000015171367515646300176720ustar00rootroot00000000000000# Gopkg.toml example # # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md # for detailed Gopkg.toml documentation. # # required = ["github.com/user/thing/cmd/thing"] # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] # # [[constraint]] # name = "github.com/user/project" # version = "1.0.0" # # [[constraint]] # name = "github.com/user/project2" # branch = "dev" # source = "github.com/myfork/project2" # # [[override]] # name = "github.com/x/y" # version = "2.4.0" # # [prune] # non-go = false # go-tests = true # unused-packages = true [[constraint]] name = "github.com/Azure/azure-pipeline-go" version = "0.2.1" [[constraint]] branch = "v1" name = "gopkg.in/check.v1" [prune] go-tests = true unused-packages = true azure-storage-blob-go-0.10.0/LICENSE000066400000000000000000000022111367515646300167200ustar00rootroot00000000000000 MIT License Copyright (c) Microsoft Corporation. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWAREazure-storage-blob-go-0.10.0/README.md000066400000000000000000000101151367515646300171740ustar00rootroot00000000000000# Azure Storage Blob SDK for Go [![GoDoc Widget]][GoDoc] [![Build Status][Travis Widget]][Travis] The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud storage. This repository contains the open source Blob SDK for Go. The [File SDK][File SDK] and [Queue SDK][Queue SDK] are also available. ## Features * Blob Storage * Create/List/Delete Containers * Create/Read/List/Update/Delete Block Blobs * Create/Read/List/Update/Delete Page Blobs * Create/Read/List/Update/Delete Append Blobs ## Getting Started * If you don't already have it, install [the Go distribution](https://golang.org/dl/) * Get the SDK, with any method you prefer: * Go Get: ```go get github.com/Azure/azure-storage-blob-go/azblob``` * Dep: add ```github.com/Azure/azure-storage-blob-go``` to Gopkg.toml: ``` [[constraint]] version = "0.3.0" name = "github.com/Azure/azure-storage-blob-go" ``` * Module: simply import the SDK and Go will download it for you * Use the SDK: ```import "github.com/Azure/azure-storage-blob-go/azblob"``` ## Version Table * If you are looking to use a specific version of the Storage Service, please refer to the following table: | Service Version | Corresponding SDK Version | Import Path | |-----------------|---------------------------|----------------------------------------------------------| | 2016-05-31 | 0.2.0 | github.com/Azure/azure-storage-blob-go/2016-05-31/azblob | | 2017-07-29 | 0.2.0 | github.com/Azure/azure-storage-blob-go/2017-07-29/azblob | | 2018-03-28 | 0.3.0 - 0.5.0 | github.com/Azure/azure-storage-blob-go/azblob | | 2018-11-09 | 0.6.0 - 0.7.0 | github.com/Azure/azure-storage-blob-go/azblob | Note: the directory structure of the SDK has changed dramatically since 0.3.0. The different Service Versions are no longer sub-directories; the latest `azblob` is directly under the root directory. In the future, each new Service Version will be introduced with a new major semantic version. ## SDK Architecture * The Azure Storage SDK for Go provides low-level and high-level APIs. * ServiceURL, ContainerURL and BlobURL objects provide the low-level API functionality and map one-to-one to the [Azure Storage Blob REST APIs](https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api) * The high-level APIs provide convenience abstractions such as uploading a large stream to a block blob (using multiple PutBlock requests). ## Code Samples * [Blob Storage Examples](https://godoc.org/github.com/Azure/azure-storage-blob-go/azblob#pkg-examples) ## License This project is licensed under MIT. ## Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. [GoDoc]: https://godoc.org/github.com/Azure/azure-storage-blob-go/azblob [GoDoc Widget]: https://godoc.org/github.com/Azure/azure-storage-blob-go/azblob?status.svg [Travis Widget]: https://travis-ci.org/Azure/azure-storage-blob-go.svg?branch=master [Travis]: https://travis-ci.org/Azure/azure-storage-blob-go [File SDK]: https://github.com/Azure/azure-storage-file-go [Queue SDK]: https://github.com/Azure/azure-storage-queue-go azure-storage-blob-go-0.10.0/azblob/000077500000000000000000000000001367515646300171705ustar00rootroot00000000000000azure-storage-blob-go-0.10.0/azblob/access_conditions.go000066400000000000000000000036351367515646300232200ustar00rootroot00000000000000package azblob import ( "time" ) // ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set. type ModifiedAccessConditions struct { IfModifiedSince time.Time IfUnmodifiedSince time.Time IfMatch ETag IfNoneMatch ETag } // pointers is for internal infrastructure. It returns the fields as pointers. func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) { if !ac.IfModifiedSince.IsZero() { ims = &ac.IfModifiedSince } if !ac.IfUnmodifiedSince.IsZero() { ius = &ac.IfUnmodifiedSince } if ac.IfMatch != ETagNone { ime = &ac.IfMatch } if ac.IfNoneMatch != ETagNone { inme = &ac.IfNoneMatch } return } // ContainerAccessConditions identifies container-specific access conditions which you optionally set. type ContainerAccessConditions struct { ModifiedAccessConditions LeaseAccessConditions } // BlobAccessConditions identifies blob-specific access conditions which you optionally set. type BlobAccessConditions struct { ModifiedAccessConditions LeaseAccessConditions } // LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set. type LeaseAccessConditions struct { LeaseID string } // pointers is for internal infrastructure. It returns the fields as pointers. func (ac LeaseAccessConditions) pointers() (leaseID *string) { if ac.LeaseID != "" { leaseID = &ac.LeaseID } return } /* // getInt32 is for internal infrastructure. It is used with access condition values where // 0 (the default setting) is meaningful. The library interprets 0 as do not send the header // and the privately-storage field in the access condition object is stored as +1 higher than desired. // THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value). func getInt32(value int32) (bool, int32) { return value > 0, value - 1 } */ azure-storage-blob-go-0.10.0/azblob/blob.json000066400000000000000000010603401367515646300210050ustar00rootroot00000000000000{ "swagger": "2.0", "info": { "title": "Azure Blob Storage", "version": "2018-11-09", "x-ms-code-generation-settings": { "header": "MIT", "strictSpecAdherence": false } }, "x-ms-parameterized-host": { "hostTemplate": "{url}", "useSchemePrefix": false, "positionInOperation": "first", "parameters": [ { "$ref": "#/parameters/Url" } ] }, "securityDefinitions": { "blob_shared_key": { "type": "apiKey", "name": "Authorization", "in": "header" } }, "schemes": [ "https" ], "consumes": [ "application/xml" ], "produces": [ "application/xml" ], "paths": {}, "x-ms-paths": { "/?restype=service&comp=properties": { "put": { "tags": [ "service" ], "operationId": "Service_SetProperties", "description": "Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules", "parameters": [ { "$ref": "#/parameters/StorageServiceProperties" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "Success (Accepted)", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "get": { "tags": [ "service" ], "operationId": "Service_GetProperties", "description": "gets the properties of a storage account's Blob service, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." } }, "schema": { "$ref": "#/definitions/StorageServiceProperties" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "service" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/?restype=service&comp=stats": { "get": { "tags": [ "service" ], "operationId": "Service_GetStatistics", "description": "Retrieves statistics related to replication for the Blob service. It is only available on the secondary location endpoint when read-access geo-redundant replication is enabled for the storage account.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/StorageServiceStats" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "service" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "stats" ] } ] }, "/?comp=list": { "get": { "tags": [ "service" ], "operationId": "Service_ListContainersSegment", "description": "The List Containers Segment operation returns a list of the containers under the specified account", "parameters": [ { "$ref": "#/parameters/Prefix" }, { "$ref": "#/parameters/Marker" }, { "$ref": "#/parameters/MaxResults" }, { "$ref": "#/parameters/ListContainersInclude" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." } }, "schema": { "$ref": "#/definitions/ListContainersSegmentResponse" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } }, "x-ms-pageable": { "nextLinkName": "NextMarker" } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "list" ] } ] }, "/?restype=service&comp=userdelegationkey": { "post": { "tags": [ "service" ], "operationId": "Service_GetUserDelegationKey", "description": "Retrieves a user delgation key for the Blob service. This is only a valid operation when using bearer token authentication.", "parameters": [ { "$ref": "#/parameters/KeyInfo" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/UserDelegationKey" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "service" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "userdelegationkey" ] } ] }, "/?restype=account&comp=properties": { "get": { "tags": [ "service" ], "operationId": "Service_GetAccountInfo", "description": "Returns the sku name and account kind ", "parameters": [ { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Success (OK)", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-sku-name": { "x-ms-client-name": "SkuName", "type": "string", "enum": [ "Standard_LRS", "Standard_GRS", "Standard_RAGRS", "Standard_ZRS", "Premium_LRS" ], "x-ms-enum": { "name": "SkuName", "modelAsString": false }, "description": "Identifies the sku name of the account" }, "x-ms-account-kind": { "x-ms-client-name": "AccountKind", "type": "string", "enum": [ "Storage", "BlobStorage", "StorageV2" ], "x-ms-enum": { "name": "AccountKind", "modelAsString": false }, "description": "Identifies the account kind" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "account" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}?restype=container": { "put": { "tags": [ "container" ], "operationId": "Container_Create", "description": "creates a new container under the specified account. If the container with the same name already exists, the operation fails", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/BlobPublicAccess" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "Success, Container created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "get": { "tags": [ "container" ], "operationId": "Container_GetProperties", "description": "returns all user-defined metadata and system properties for the specified container. The data returned does not include the container's list of blobs", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success", "headers": { "x-ms-meta": { "type": "string", "x-ms-client-name": "Metadata", "x-ms-header-collection-prefix": "x-ms-meta-" }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-duration": { "x-ms-client-name": "LeaseDuration", "description": "When a blob is leased, specifies whether the lease is of infinite or fixed duration.", "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "x-ms-lease-state": { "x-ms-client-name": "LeaseState", "description": "Lease state of the blob.", "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "x-ms-lease-status": { "x-ms-client-name": "LeaseStatus", "description": "The current lease status of the blob.", "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-public-access": { "x-ms-client-name": "BlobPublicAccess", "description": "Indicated whether data in the container may be accessed publicly and the level of access", "type": "string", "enum": [ "container", "blob" ], "x-ms-enum": { "name": "PublicAccessType", "modelAsString": true } }, "x-ms-has-immutability-policy": { "x-ms-client-name": "HasImmutabilityPolicy", "description": "Indicates whether the container has an immutability policy set on it.", "type": "boolean" }, "x-ms-has-legal-hold": { "x-ms-client-name": "HasLegalHold", "description": "Indicates whether the container has a legal hold.", "type": "boolean" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "delete": { "tags": [ "container" ], "operationId": "Container_Delete", "description": "operation marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "Accepted", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] } ] }, "/{containerName}?restype=container&comp=metadata": { "put": { "tags": [ "container" ], "operationId": "Container_SetMetadata", "description": "operation sets one or more user-defined name-value pairs for the specified container.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "metadata" ] } ] }, "/{containerName}?restype=container&comp=acl": { "get": { "tags": [ "container" ], "operationId": "Container_GetAccessPolicy", "description": "gets the permissions for the specified container. The permissions indicate whether container data may be accessed publicly.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success", "headers": { "x-ms-blob-public-access": { "x-ms-client-name": "BlobPublicAccess", "description": "Indicated whether data in the container may be accessed publicly and the level of access", "type": "string", "enum": [ "container", "blob" ], "x-ms-enum": { "name": "PublicAccessType", "modelAsString": true } }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/SignedIdentifiers" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "put": { "tags": [ "container" ], "operationId": "Container_SetAccessPolicy", "description": "sets the permissions for the specified container. The permissions indicate whether blobs in a container may be accessed publicly.", "parameters": [ { "$ref": "#/parameters/ContainerAcl" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobPublicAccess" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "acl" ] } ] }, "/{containerName}?comp=lease&restype=container&acquire": { "put": { "tags": [ "container" ], "operationId": "Container_AcquireLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseDuration" }, { "$ref": "#/parameters/ProposedLeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The Acquire operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a container's lease" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "acquire" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?comp=lease&restype=container&release": { "put": { "tags": [ "container" ], "operationId": "Container_ReleaseLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Release operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "release" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?comp=lease&restype=container&renew": { "put": { "tags": [ "container" ], "operationId": "Container_RenewLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Renew operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a container's lease" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "renew" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?comp=lease&restype=container&break": { "put": { "tags": [ "container" ], "operationId": "Container_BreakLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseBreakPeriod" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The Break operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-time": { "x-ms-client-name": "LeaseTime", "type": "integer", "description": "Approximate time remaining in the lease period, in seconds." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "break" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?comp=lease&restype=container&change": { "put": { "tags": [ "container" ], "operationId": "Container_ChangeLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/ProposedLeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Change operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a container's lease" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "change" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?restype=container&comp=list&flat": { "get": { "tags": [ "containers" ], "operationId": "Container_ListBlobFlatSegment", "description": "[Update] The List Blobs operation returns a list of the blobs under the specified container", "parameters": [ { "$ref": "#/parameters/Prefix" }, { "$ref": "#/parameters/Marker" }, { "$ref": "#/parameters/MaxResults" }, { "$ref": "#/parameters/ListBlobsInclude" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "Content-Type": { "type": "string", "description": "The media type of the body of the response. For List Blobs this is 'application/xml'" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/ListBlobsFlatSegmentResponse" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } }, "x-ms-pageable": { "nextLinkName": "NextMarker" } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "list" ] } ] }, "/{containerName}?restype=container&comp=list&hierarchy": { "get": { "tags": [ "containers" ], "operationId": "Container_ListBlobHierarchySegment", "description": "[Update] The List Blobs operation returns a list of the blobs under the specified container", "parameters": [ { "$ref": "#/parameters/Prefix" }, { "$ref": "#/parameters/Delimiter" }, { "$ref": "#/parameters/Marker" }, { "$ref": "#/parameters/MaxResults" }, { "$ref": "#/parameters/ListBlobsInclude" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "Content-Type": { "type": "string", "description": "The media type of the body of the response. For List Blobs this is 'application/xml'" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/ListBlobsHierarchySegmentResponse" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } }, "x-ms-pageable": { "nextLinkName": "NextMarker" } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "list" ] } ] }, "/{containerName}?restype=account&comp=properties": { "get": { "tags": [ "container" ], "operationId": "Container_GetAccountInfo", "description": "Returns the sku name and account kind ", "parameters": [ { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Success (OK)", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-sku-name": { "x-ms-client-name": "SkuName", "type": "string", "enum": [ "Standard_LRS", "Standard_GRS", "Standard_RAGRS", "Standard_ZRS", "Premium_LRS" ], "x-ms-enum": { "name": "SkuName", "modelAsString": false }, "description": "Identifies the sku name of the account" }, "x-ms-account-kind": { "x-ms-client-name": "AccountKind", "type": "string", "enum": [ "Storage", "BlobStorage", "StorageV2" ], "x-ms-enum": { "name": "AccountKind", "modelAsString": false }, "description": "Identifies the account kind" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "account" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}/{blob}": { "get": { "tags": [ "blob" ], "operationId": "Blob_Download", "description": "The Download operation reads or downloads a blob from the system, including its metadata and properties. You can also call Download to read a snapshot.", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/GetRangeContentMD5" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Returns the content of the entire blob.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-meta": { "type": "string", "x-ms-client-name": "Metadata", "x-ms-header-collection-prefix": "x-ms-meta-" }, "Content-Length": { "type": "integer", "format": "int64", "description": "The number of bytes present in the response body." }, "Content-Type": { "type": "string", "description": "The media type of the body of the response. For Download Blob this is 'application/octet-stream'" }, "Content-Range": { "type": "string", "description": "Indicates the range of bytes returned in the event that the client requested a subset of the blob by setting the 'Range' request header." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "Content-Encoding": { "type": "string", "description": "This header returns the value that was specified for the Content-Encoding request header" }, "Cache-Control": { "type": "string", "description": "This header is returned if it was previously specified for the blob." }, "Content-Disposition": { "type": "string", "description": "This header returns the value that was specified for the 'x-ms-blob-content-disposition' header. The Content-Disposition response header field conveys additional information about how to process the response payload, and also can be used to attach additional metadata. For example, if set to attachment, it indicates that the user-agent should not display the response, but instead show a Save As dialog with a filename other than the blob name specified." }, "Content-Language": { "type": "string", "description": "This header returns the value that was specified for the Content-Language request header." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-blob-type": { "x-ms-client-name": "BlobType", "description": "The blob's type.", "type": "string", "enum": [ "BlockBlob", "PageBlob", "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } }, "x-ms-copy-completion-time": { "x-ms-client-name": "CopyCompletionTime", "type": "string", "format": "date-time-rfc1123", "description": "Conclusion time of the last attempted Copy Blob operation where this blob was the destination blob. This value can specify the time of a completed, aborted, or failed copy attempt. This header does not appear if a copy is pending, if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status-description": { "x-ms-client-name": "CopyStatusDescription", "type": "string", "description": "Only appears when x-ms-copy-status is failed or pending. Describes the cause of the last fatal or non-fatal copy operation failure. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-progress": { "x-ms-client-name": "CopyProgress", "type": "string", "description": "Contains the number of bytes copied and the total bytes in the source in the last attempted Copy Blob operation where this blob was the destination blob. Can show between 0 and Content-Length bytes copied. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-source": { "x-ms-client-name": "CopySource", "type": "string", "description": "URL up to 2 KB in length that specifies the source blob or file used in the last attempted Copy Blob operation where this blob was the destination blob. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } }, "x-ms-lease-duration": { "x-ms-client-name": "LeaseDuration", "description": "When a blob is leased, specifies whether the lease is of infinite or fixed duration.", "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "x-ms-lease-state": { "x-ms-client-name": "LeaseState", "description": "Lease state of the blob.", "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "x-ms-lease-status": { "x-ms-client-name": "LeaseStatus", "description": "The current lease status of the blob.", "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Accept-Ranges": { "type": "string", "description": "Indicates that the service supports requests for partial blob content." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." }, "x-ms-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the blob data and application metadata are completely encrypted using the specified algorithm. Otherwise, the value is set to false (when the blob is unencrypted, or if only parts of the blob/application metadata are encrypted)." }, "x-ms-blob-content-md5": { "x-ms-client-name": "BlobContentMD5", "type": "string", "format": "byte", "description": "If the blob has a MD5 hash, and if request contains range header (Range or x-ms-range), this response header is returned with the value of the whole blob's MD5 value. This value may or may not be equal to the value returned in Content-MD5 header, with the latter calculated from the requested range" } }, "schema": { "type": "object", "format": "file" } }, "206": { "description": "Returns the content of a specified range of the blob.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-meta": { "type": "string", "x-ms-client-name": "Metadata", "x-ms-header-collection-prefix": "x-ms-meta-" }, "Content-Length": { "type": "integer", "format": "int64", "description": "The number of bytes present in the response body." }, "Content-Type": { "type": "string", "description": "The media type of the body of the response. For Download Blob this is 'application/octet-stream'" }, "Content-Range": { "type": "string", "description": "Indicates the range of bytes returned in the event that the client requested a subset of the blob by setting the 'Range' request header." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "Content-Encoding": { "type": "string", "description": "This header returns the value that was specified for the Content-Encoding request header" }, "Cache-Control": { "type": "string", "description": "This header is returned if it was previously specified for the blob." }, "Content-Disposition": { "type": "string", "description": "This header returns the value that was specified for the 'x-ms-blob-content-disposition' header. The Content-Disposition response header field conveys additional information about how to process the response payload, and also can be used to attach additional metadata. For example, if set to attachment, it indicates that the user-agent should not display the response, but instead show a Save As dialog with a filename other than the blob name specified." }, "Content-Language": { "type": "string", "description": "This header returns the value that was specified for the Content-Language request header." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-blob-type": { "x-ms-client-name": "BlobType", "description": "The blob's type.", "type": "string", "enum": [ "BlockBlob", "PageBlob", "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } }, "x-ms-copy-completion-time": { "x-ms-client-name": "CopyCompletionTime", "type": "string", "format": "date-time-rfc1123", "description": "Conclusion time of the last attempted Copy Blob operation where this blob was the destination blob. This value can specify the time of a completed, aborted, or failed copy attempt. This header does not appear if a copy is pending, if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status-description": { "x-ms-client-name": "CopyStatusDescription", "type": "string", "description": "Only appears when x-ms-copy-status is failed or pending. Describes the cause of the last fatal or non-fatal copy operation failure. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-progress": { "x-ms-client-name": "CopyProgress", "type": "string", "description": "Contains the number of bytes copied and the total bytes in the source in the last attempted Copy Blob operation where this blob was the destination blob. Can show between 0 and Content-Length bytes copied. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-source": { "x-ms-client-name": "CopySource", "type": "string", "description": "URL up to 2 KB in length that specifies the source blob or file used in the last attempted Copy Blob operation where this blob was the destination blob. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } }, "x-ms-lease-duration": { "x-ms-client-name": "LeaseDuration", "description": "When a blob is leased, specifies whether the lease is of infinite or fixed duration.", "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "x-ms-lease-state": { "x-ms-client-name": "LeaseState", "description": "Lease state of the blob.", "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "x-ms-lease-status": { "x-ms-client-name": "LeaseStatus", "description": "The current lease status of the blob.", "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Accept-Ranges": { "type": "string", "description": "Indicates that the service supports requests for partial blob content." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." }, "x-ms-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the blob data and application metadata are completely encrypted using the specified algorithm. Otherwise, the value is set to false (when the blob is unencrypted, or if only parts of the blob/application metadata are encrypted)." }, "x-ms-blob-content-md5": { "x-ms-client-name": "BlobContentMD5", "type": "string", "format": "byte", "description": "If the blob has a MD5 hash, and if request contains range header (Range or x-ms-range), this response header is returned with the value of the whole blob's MD5 value. This value may or may not be equal to the value returned in Content-MD5 header, with the latter calculated from the requested range" } }, "schema": { "type": "object", "format": "file" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "head": { "tags": [ "blob" ], "operationId": "Blob_GetProperties", "description": "The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. It does not return the content of the blob.", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Returns the properties of the blob.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-creation-time": { "x-ms-client-name": "CreationTime", "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was created." }, "x-ms-meta": { "type": "string", "x-ms-client-name": "Metadata", "x-ms-header-collection-prefix": "x-ms-meta-" }, "x-ms-blob-type": { "x-ms-client-name": "BlobType", "description": "The blob's type.", "type": "string", "enum": [ "BlockBlob", "PageBlob", "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } }, "x-ms-copy-completion-time": { "x-ms-client-name": "CopyCompletionTime", "type": "string", "format": "date-time-rfc1123", "description": "Conclusion time of the last attempted Copy Blob operation where this blob was the destination blob. This value can specify the time of a completed, aborted, or failed copy attempt. This header does not appear if a copy is pending, if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status-description": { "x-ms-client-name": "CopyStatusDescription", "type": "string", "description": "Only appears when x-ms-copy-status is failed or pending. Describes the cause of the last fatal or non-fatal copy operation failure. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-progress": { "x-ms-client-name": "CopyProgress", "type": "string", "description": "Contains the number of bytes copied and the total bytes in the source in the last attempted Copy Blob operation where this blob was the destination blob. Can show between 0 and Content-Length bytes copied. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-source": { "x-ms-client-name": "CopySource", "type": "string", "description": "URL up to 2 KB in length that specifies the source blob or file used in the last attempted Copy Blob operation where this blob was the destination blob. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } }, "x-ms-incremental-copy": { "x-ms-client-name": "IsIncrementalCopy", "type": "boolean", "description": "Included if the blob is incremental copy blob." }, "x-ms-copy-destination-snapshot": { "x-ms-client-name": "DestinationSnapshot", "type": "string", "description": "Included if the blob is incremental copy blob or incremental copy snapshot, if x-ms-copy-status is success. Snapshot time of the last successful incremental copy snapshot for this blob." }, "x-ms-lease-duration": { "x-ms-client-name": "LeaseDuration", "description": "When a blob is leased, specifies whether the lease is of infinite or fixed duration.", "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "x-ms-lease-state": { "x-ms-client-name": "LeaseState", "description": "Lease state of the blob.", "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "x-ms-lease-status": { "x-ms-client-name": "LeaseStatus", "description": "The current lease status of the blob.", "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "Content-Length": { "type": "integer", "format": "int64", "description": "The number of bytes present in the response body." }, "Content-Type": { "type": "string", "description": "The content type specified for the blob. The default content type is 'application/octet-stream'" }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "Content-Encoding": { "type": "string", "description": "This header returns the value that was specified for the Content-Encoding request header" }, "Content-Disposition": { "type": "string", "description": "This header returns the value that was specified for the 'x-ms-blob-content-disposition' header. The Content-Disposition response header field conveys additional information about how to process the response payload, and also can be used to attach additional metadata. For example, if set to attachment, it indicates that the user-agent should not display the response, but instead show a Save As dialog with a filename other than the blob name specified." }, "Content-Language": { "type": "string", "description": "This header returns the value that was specified for the Content-Language request header." }, "Cache-Control": { "type": "string", "description": "This header is returned if it was previously specified for the blob." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "Accept-Ranges": { "type": "string", "description": "Indicates that the service supports requests for partial blob content." }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." }, "x-ms-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the blob data and application metadata are completely encrypted using the specified algorithm. Otherwise, the value is set to false (when the blob is unencrypted, or if only parts of the blob/application metadata are encrypted)." }, "x-ms-access-tier": { "x-ms-client-name": "AccessTier", "type": "string", "description": "The tier of page blob on a premium storage account or tier of block blob on blob storage LRS accounts. For a list of allowed premium page blob tiers, see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/premium-storage#features. For blob storage LRS accounts, valid values are Hot/Cool/Archive." }, "x-ms-access-tier-inferred": { "x-ms-client-name": "AccessTierInferred", "type": "boolean", "description": "For page blobs on a premium storage account only. If the access tier is not explicitly set on the blob, the tier is inferred based on its content length and this header will be returned with true value." }, "x-ms-archive-status": { "x-ms-client-name": "ArchiveStatus", "type": "string", "description": "For blob storage LRS accounts, valid values are rehydrate-pending-to-hot/rehydrate-pending-to-cool. If the blob is being rehydrated and is not complete then this header is returned indicating that rehydrate is pending and also tells the destination tier." }, "x-ms-access-tier-change-time": { "x-ms-client-name": "AccessTierChangeTime", "type": "string", "format": "date-time-rfc1123", "description": "The time the tier was changed on the object. This is only returned if the tier on the block blob was ever set." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "delete": { "tags": [ "blob" ], "operationId": "Blob_Delete", "description": "If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the \"include=deleted\" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound).", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/DeleteSnapshots" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The delete request was accepted and the blob will be deleted.", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } } }, "/{containerName}/{blob}?PageBlob": { "put": { "tags": [ "blob" ], "operationId": "PageBlob_Create", "description": "The Create operation creates a new page blob.", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/BlobContentLengthRequired" }, { "$ref": "#/parameters/BlobSequenceNumber" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The blob was created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "x-ms-blob-type", "x-ms-client-name": "blobType", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Specifies the type of blob to create: block blob, page blob, or append blob.", "type": "string", "enum": [ "PageBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } } ] }, "/{containerName}/{blob}?AppendBlob": { "put": { "tags": [ "blob" ], "operationId": "AppendBlob_Create", "description": "The Create Append Blob operation creates a new append blob.", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The blob was created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "x-ms-blob-type", "x-ms-client-name": "blobType", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Specifies the type of blob to create: block blob, page blob, or append blob.", "type": "string", "enum": [ "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } } ] }, "/{containerName}/{blob}?BlockBlob": { "put": { "tags": [ "blob" ], "operationId": "BlockBlob_Upload", "description": "The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a block blob, use the Put Block List operation.", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Body" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The blob was updated.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "x-ms-blob-type", "x-ms-client-name": "blobType", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Specifies the type of blob to create: block blob, page blob, or append blob.", "type": "string", "enum": [ "BlockBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } } ] }, "/{containerName}/{blob}?comp=undelete": { "put": { "tags": [ "blob" ], "operationId": "Blob_Undelete", "description": "Undelete a blob that was previously soft deleted", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The blob was undeleted successfully.", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "undelete" ] } ] }, "/{containerName}/{blob}?comp=properties&SetHTTPHeaders": { "put": { "tags": [ "blob" ], "operationId": "Blob_SetHTTPHeaders", "description": "The Set HTTP Headers operation sets system properties on the blob", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The properties were set successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}/{blob}?comp=metadata": { "put": { "tags": [ "blob" ], "operationId": "Blob_SetMetadata", "description": "The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value pairs", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The metadata was set successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "metadata" ] } ] }, "/{containerName}/{blob}?comp=lease&acquire": { "put": { "tags": [ "blob" ], "operationId": "Blob_AcquireLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseDuration" }, { "$ref": "#/parameters/ProposedLeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The Acquire operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a blobs's lease" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "acquire" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=lease&release": { "put": { "tags": [ "blob" ], "operationId": "Blob_ReleaseLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Release operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "release" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=lease&renew": { "put": { "tags": [ "blob" ], "operationId": "Blob_RenewLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Renew operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a blobs's lease" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "renew" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=lease&change": { "put": { "tags": [ "blob" ], "operationId": "Blob_ChangeLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/ProposedLeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Change operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a blobs's lease" }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "change" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=lease&break": { "put": { "tags": [ "blob" ], "operationId": "Blob_BreakLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseBreakPeriod" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The Break operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-time": { "x-ms-client-name": "LeaseTime", "type": "integer", "description": "Approximate time remaining in the lease period, in seconds." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "break" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=snapshot": { "put": { "tags": [ "blob" ], "operationId": "Blob_CreateSnapshot", "description": "The Create Snapshot operation creates a read-only snapshot of a blob", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The snaptshot was taken successfully.", "headers": { "x-ms-snapshot": { "x-ms-client-name": "Snapshot", "type": "string", "description": "Uniquely identifies the snapshot and indicates the snapshot version. It may be used in subsequent requests to access the snapshot" }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "snapshot" ] } ] }, "/{containerName}/{blob}?comp=copy": { "put": { "tags": [ "blob" ], "operationId": "Blob_StartCopyFromURL", "description": "The Start Copy From URL operation copies a blob or an internet resource to a new blob.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/CopySource" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The copy blob has been accepted with the specified copy status.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [] }, "/{containerName}/{blob}?comp=copy&sync": { "put": { "tags": [ "blob" ], "operationId": "Blob_CopyFromURL", "description": "The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/CopySource" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The copy has completed.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "success" ], "x-ms-enum": { "name": "SyncCopyStatusType", "modelAsString": false } } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "x-ms-requires-sync", "in": "header", "required": true, "type": "string", "enum": [ "true" ] } ] }, "/{containerName}/{blob}?comp=copy©id={CopyId}": { "put": { "tags": [ "blob" ], "operationId": "Blob_AbortCopyFromURL", "description": "The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination blob with zero length and full metadata.", "parameters": [ { "$ref": "#/parameters/CopyId" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "204": { "description": "The delete request was accepted and the blob will be deleted.", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "copy" ] }, { "name": "x-ms-copy-action", "x-ms-client-name": "copyActionAbortConstant", "in": "header", "required": true, "type": "string", "enum": [ "abort" ], "x-ms-parameter-location": "method" } ] }, "/{containerName}/{blob}?comp=tier": { "put": { "tags": [ "blobs" ], "operationId": "Blob_SetTier", "description": "The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/AccessTier" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" }, { "$ref": "#/parameters/LeaseIdOptional" } ], "responses": { "200": { "description": "The new tier will take effect immediately.", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and newer." } } }, "202": { "description": "The transition to the new tier is pending.", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and newer." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "tier" ] } ] }, "/{containerName}/{blob}?restype=account&comp=properties": { "get": { "tags": [ "blob" ], "operationId": "Blob_GetAccountInfo", "description": "Returns the sku name and account kind ", "parameters": [ { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Success (OK)", "headers": { "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-sku-name": { "x-ms-client-name": "SkuName", "type": "string", "enum": [ "Standard_LRS", "Standard_GRS", "Standard_RAGRS", "Standard_ZRS", "Premium_LRS" ], "x-ms-enum": { "name": "SkuName", "modelAsString": false }, "description": "Identifies the sku name of the account" }, "x-ms-account-kind": { "x-ms-client-name": "AccountKind", "type": "string", "enum": [ "Storage", "BlobStorage", "StorageV2" ], "x-ms-enum": { "name": "AccountKind", "modelAsString": false }, "description": "Identifies the account kind" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "account" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}/{blob}?comp=block": { "put": { "tags": [ "blockblob" ], "operationId": "BlockBlob_StageBlock", "description": "The Stage Block operation creates a new block to be committed as part of a blob", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/BlockId" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/ContentMD5" }, { "$ref": "#/parameters/Body" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block was created.", "headers": { "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "block" ] } ] }, "/{containerName}/{blob}?comp=block&fromURL": { "put": { "tags": [ "blockblob" ], "operationId": "BlockBlob_StageBlockFromURL", "description": "The Stage Block operation creates a new block to be committed as part of a blob where the contents are read from a URL.", "parameters": [ { "$ref": "#/parameters/BlockId" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/SourceUrl" }, { "$ref": "#/parameters/SourceRange" }, { "$ref": "#/parameters/SourceContentMD5" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block was created.", "headers": { "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "block" ] } ] }, "/{containerName}/{blob}?comp=blocklist": { "put": { "tags": [ "blockblob" ], "operationId": "BlockBlob_CommitBlockList", "description": "The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, then committing the new and existing blocks together. You can do this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the block, whichever list it may belong to.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "name": "blocks", "in": "body", "required": true, "schema": { "$ref": "#/definitions/BlockLookupList" } }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block list was recorded.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "get": { "tags": [ "blockblob" ], "operationId": "BlockBlob_GetBlockList", "description": "The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/BlockListType" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The page range was written.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Content-Type": { "type": "string", "description": "The media type of the body of the response. For Get Block List this is 'application/xml'" }, "x-ms-blob-content-length": { "x-ms-client-name": "BlobContentLength", "type": "integer", "format": "int64", "description": "The size of the blob in bytes." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/BlockList" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "blocklist" ] } ] }, "/{containerName}/{blob}?comp=page&update": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_UploadPages", "description": "The Upload Pages operation writes a range of pages to a page blob", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Body" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/ContentMD5" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfSequenceNumberLessThanOrEqualTo" }, { "$ref": "#/parameters/IfSequenceNumberLessThan" }, { "$ref": "#/parameters/IfSequenceNumberEqualTo" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The page range was written.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for the page blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "page" ] }, { "name": "x-ms-page-write", "x-ms-client-name": "pageWrite", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Required. You may specify one of the following options:\n - Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update.\n - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob size.", "type": "string", "enum": [ "update" ], "x-ms-enum": { "name": "PageWriteType", "modelAsString": false } } ] }, "/{containerName}/{blob}?comp=page&clear": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_ClearPages", "description": "The Clear Pages operation clears a set of pages from a page blob", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfSequenceNumberLessThanOrEqualTo" }, { "$ref": "#/parameters/IfSequenceNumberLessThan" }, { "$ref": "#/parameters/IfSequenceNumberEqualTo" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The page range was cleared.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for the page blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "page" ] }, { "name": "x-ms-page-write", "x-ms-client-name": "pageWrite", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Required. You may specify one of the following options:\n - Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update.\n - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob size.", "type": "string", "enum": [ "clear" ], "x-ms-enum": { "name": "PageWriteType", "modelAsString": false } } ] }, "/{containerName}/{blob}?comp=page&update&fromUrl": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_UploadPagesFromURL", "description": "The Upload Pages operation writes a range of pages to a page blob where the contents are read from a URL", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/SourceUrl" }, { "$ref": "#/parameters/SourceRangeRequiredPutPageFromUrl" }, { "$ref": "#/parameters/SourceContentMD5" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/RangeRequiredPutPageFromUrl" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfSequenceNumberLessThanOrEqualTo" }, { "$ref": "#/parameters/IfSequenceNumberLessThan" }, { "$ref": "#/parameters/IfSequenceNumberEqualTo" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The page range was written.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for the page blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "page" ] }, { "name": "x-ms-page-write", "x-ms-client-name": "pageWrite", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Required. You may specify one of the following options:\n - Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update.\n - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob size.", "type": "string", "enum": [ "update" ], "x-ms-enum": { "name": "PageWriteType", "modelAsString": false } } ] }, "/{containerName}/{blob}?comp=pagelist": { "get": { "tags": [ "pageblob" ], "operationId": "PageBlob_GetPageRanges", "description": "The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page blob", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Information on the page blob was found.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "x-ms-blob-content-length": { "x-ms-client-name": "BlobContentLength", "type": "integer", "format": "int64", "description": "The size of the blob in bytes." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/PageList" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "pagelist" ] } ] }, "/{containerName}/{blob}?comp=pagelist&diff": { "get": { "tags": [ "pageblob" ], "operationId": "PageBlob_GetPageRangesDiff", "description": "The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were changed between target blob and previous snapshot.", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/PrevSnapshot" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Information on the page blob was found.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "x-ms-blob-content-length": { "x-ms-client-name": "BlobContentLength", "type": "integer", "format": "int64", "description": "The size of the blob in bytes." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/PageList" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "pagelist" ] } ] }, "/{containerName}/{blob}?comp=properties&Resize": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_Resize", "description": "Resize the Blob", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/BlobContentLengthRequired" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Blob was resized successfully", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}/{blob}?comp=properties&UpdateSequenceNumber": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_UpdateSequenceNumber", "description": "Update the sequence number of the blob", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/SequenceNumberAction" }, { "$ref": "#/parameters/BlobSequenceNumber" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The sequence numbers were updated successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}/{blob}?comp=incrementalcopy": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_CopyIncremental", "description": "The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. This API is supported since REST version 2016-05-31.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/CopySource" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The blob was copied.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "incrementalcopy" ] } ] }, "/{containerName}/{blob}?comp=appendblock": { "put": { "tags": [ "appendblob" ], "consumes": [ "application/octet-stream" ], "operationId": "AppendBlob_AppendBlock", "description": "The Append Block operation commits a new block of data to the end of an existing append blob. The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.", "parameters": [ { "$ref": "#/parameters/Body" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/ContentMD5" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobConditionMaxSize" }, { "$ref": "#/parameters/BlobConditionAppendPos" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block was created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-append-offset": { "x-ms-client-name": "BlobAppendOffset", "type": "string", "description": "This response header is returned only for append operations. It returns the offset at which the block was committed, in bytes." }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "appendblock" ] } ] }, "/{containerName}/{blob}?comp=appendblock&fromUrl": { "put": { "tags": [ "appendblob" ], "operationId": "AppendBlob_AppendBlockFromUrl", "description": "The Append Block operation commits a new block of data to the end of an existing append blob where the contents are read from a source url. The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.", "parameters": [ { "$ref": "#/parameters/SourceUrl" }, { "$ref": "#/parameters/SourceRange" }, { "$ref": "#/parameters/SourceContentMD5" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobConditionMaxSize" }, { "$ref": "#/parameters/BlobConditionAppendPos" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block was created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-append-offset": { "x-ms-client-name": "BlobAppendOffset", "type": "string", "description": "This response header is returned only for append operations. It returns the offset at which the block was committed, in bytes." }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "appendblock" ] } ] } }, "definitions": { "KeyInfo": { "type": "object", "required": [ "Start", "Expiry" ], "description": "Key information", "properties": { "Start": { "description": "The date-time the key is active in ISO 8601 UTC time", "type": "string" }, "Expiry": { "description": "The date-time the key expires in ISO 8601 UTC time", "type": "string" } } }, "UserDelegationKey": { "type": "object", "required": [ "SignedOid", "SignedTid", "SignedStart", "SignedExpiry", "SignedService", "SignedVersion", "Value" ], "description": "A user delegation key", "properties": { "SignedOid": { "description": "The Azure Active Directory object ID in GUID format.", "type": "string" }, "SignedTid": { "description": "The Azure Active Directory tenant ID in GUID format", "type": "string" }, "SignedStart": { "description": "The date-time the key is active", "type": "string", "format": "date-time" }, "SignedExpiry": { "description": "The date-time the key expires", "type": "string", "format": "date-time" }, "SignedService": { "description": "Abbreviation of the Azure Storage service that accepts the key", "type": "string" }, "SignedVersion": { "description": "The service version that created the key", "type": "string" }, "Value": { "description": "The key as a base64 string", "type": "string" } } }, "PublicAccessType": { "type": "string", "enum": [ "container", "blob" ], "x-ms-enum": { "name": "PublicAccessType", "modelAsString": true } }, "CopyStatus": { "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } }, "LeaseDuration": { "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "LeaseState": { "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "LeaseStatus": { "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "StorageError": { "type": "object", "properties": { "Code": { "type": "string" }, "Message": { "type": "string" } } }, "AccessPolicy": { "type": "object", "required": [ "Start", "Expiry", "Permission" ], "description": "An Access policy", "properties": { "Start": { "description": "the date-time the policy is active", "type": "string", "format": "date-time" }, "Expiry": { "description": "the date-time the policy expires", "type": "string", "format": "date-time" }, "Permission": { "description": "the permissions for the acl policy", "type": "string" } } }, "AccessTier": { "type": "string", "enum": [ "P4", "P6", "P10", "P20", "P30", "P40", "P50", "Hot", "Cool", "Archive" ], "x-ms-enum": { "name": "AccessTier", "modelAsString": true } }, "ArchiveStatus": { "type": "string", "enum": [ "rehydrate-pending-to-hot", "rehydrate-pending-to-cool" ], "x-ms-enum": { "name": "ArchiveStatus", "modelAsString": true } }, "BlobItem": { "xml": { "name": "Blob" }, "description": "An Azure Storage blob", "type": "object", "required": [ "Name", "Deleted", "Snapshot", "Properties" ], "properties": { "Name": { "type": "string" }, "Deleted": { "type": "boolean" }, "Snapshot": { "type": "string" }, "Properties": { "$ref": "#/definitions/BlobProperties" }, "Metadata": { "$ref": "#/definitions/Metadata" } } }, "BlobProperties": { "xml": { "name": "Properties" }, "description": "Properties of a blob", "type": "object", "required": [ "Etag", "Last-Modified" ], "properties": { "Creation-Time": { "type": "string", "format": "date-time-rfc1123" }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123" }, "Etag": { "type": "string", "format": "etag" }, "Content-Length": { "type": "integer", "format": "int64", "description": "Size in bytes" }, "Content-Type": { "type": "string" }, "Content-Encoding": { "type": "string" }, "Content-Language": { "type": "string" }, "Content-MD5": { "type": "string", "format": "byte" }, "Content-Disposition": { "type": "string" }, "Cache-Control": { "type": "string" }, "x-ms-blob-sequence-number": { "x-ms-client-name": "blobSequenceNumber", "type": "integer", "format": "int64" }, "BlobType": { "type": "string", "enum": [ "BlockBlob", "PageBlob", "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } }, "LeaseStatus": { "$ref": "#/definitions/LeaseStatus" }, "LeaseState": { "$ref": "#/definitions/LeaseState" }, "LeaseDuration": { "$ref": "#/definitions/LeaseDuration" }, "CopyId": { "type": "string" }, "CopyStatus": { "$ref": "#/definitions/CopyStatus" }, "CopySource": { "type": "string" }, "CopyProgress": { "type": "string" }, "CopyCompletionTime": { "type": "string", "format": "date-time-rfc1123" }, "CopyStatusDescription": { "type": "string" }, "ServerEncrypted": { "type": "boolean" }, "IncrementalCopy": { "type": "boolean" }, "DestinationSnapshot": { "type": "string" }, "DeletedTime": { "type": "string", "format": "date-time-rfc1123" }, "RemainingRetentionDays": { "type": "integer" }, "AccessTier": { "$ref": "#/definitions/AccessTier" }, "AccessTierInferred": { "type": "boolean" }, "ArchiveStatus": { "$ref": "#/definitions/ArchiveStatus" }, "AccessTierChangeTime": { "type": "string", "format": "date-time-rfc1123" } } }, "ListBlobsFlatSegmentResponse": { "xml": { "name": "EnumerationResults" }, "description": "An enumeration of blobs", "type": "object", "required": [ "ServiceEndpoint", "ContainerName", "Segment" ], "properties": { "ServiceEndpoint": { "type": "string", "xml": { "attribute": true } }, "ContainerName": { "type": "string", "xml": { "attribute": true } }, "Prefix": { "type": "string" }, "Marker": { "type": "string" }, "MaxResults": { "type": "integer" }, "Delimiter": { "type": "string" }, "Segment": { "$ref": "#/definitions/BlobFlatListSegment" }, "NextMarker": { "type": "string" } } }, "ListBlobsHierarchySegmentResponse": { "xml": { "name": "EnumerationResults" }, "description": "An enumeration of blobs", "type": "object", "required": [ "ServiceEndpoint", "ContainerName", "Segment" ], "properties": { "ServiceEndpoint": { "type": "string", "xml": { "attribute": true } }, "ContainerName": { "type": "string", "xml": { "attribute": true } }, "Prefix": { "type": "string" }, "Marker": { "type": "string" }, "MaxResults": { "type": "integer" }, "Delimiter": { "type": "string" }, "Segment": { "$ref": "#/definitions/BlobHierarchyListSegment" }, "NextMarker": { "type": "string" } } }, "BlobFlatListSegment": { "xml": { "name": "Blobs" }, "required": [ "BlobItems" ], "type": "object", "properties": { "BlobItems": { "type": "array", "items": { "$ref": "#/definitions/BlobItem" } } } }, "BlobHierarchyListSegment": { "xml": { "name": "Blobs" }, "type": "object", "required": [ "BlobItems" ], "properties": { "BlobPrefixes": { "type": "array", "items": { "$ref": "#/definitions/BlobPrefix" } }, "BlobItems": { "type": "array", "items": { "$ref": "#/definitions/BlobItem" } } } }, "BlobPrefix": { "type": "object", "required": [ "Name" ], "properties": { "Name": { "type": "string" } } }, "Block": { "type": "object", "required": [ "Name", "Size" ], "description": "Represents a single block in a block blob. It describes the block's ID and size.", "properties": { "Name": { "description": "The base64 encoded block ID.", "type": "string" }, "Size": { "description": "The block size in bytes.", "type": "integer" } } }, "BlockList": { "type": "object", "properties": { "CommittedBlocks": { "xml": { "wrapped": true }, "type": "array", "items": { "$ref": "#/definitions/Block" } }, "UncommittedBlocks": { "xml": { "wrapped": true }, "type": "array", "items": { "$ref": "#/definitions/Block" } } } }, "BlockLookupList": { "type": "object", "properties": { "Committed": { "type": "array", "items": { "type": "string", "xml": { "name": "Committed" } } }, "Uncommitted": { "type": "array", "items": { "type": "string", "xml": { "name": "Uncommitted" } } }, "Latest": { "type": "array", "items": { "type": "string", "xml": { "name": "Latest" } } } }, "xml": { "name": "BlockList" } }, "ContainerItem": { "xml": { "name": "Container" }, "type": "object", "required": [ "Name", "Properties" ], "description": "An Azure Storage container", "properties": { "Name": { "type": "string" }, "Properties": { "$ref": "#/definitions/ContainerProperties" }, "Metadata": { "$ref": "#/definitions/Metadata" } } }, "ContainerProperties": { "type": "object", "required": [ "Last-Modified", "Etag" ], "description": "Properties of a container", "properties": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123" }, "Etag": { "type": "string", "format": "etag" }, "LeaseStatus": { "$ref": "#/definitions/LeaseStatus" }, "LeaseState": { "$ref": "#/definitions/LeaseState" }, "LeaseDuration": { "$ref": "#/definitions/LeaseDuration" }, "PublicAccess": { "$ref": "#/definitions/PublicAccessType" }, "HasImmutabilityPolicy": { "type": "boolean" }, "HasLegalHold": { "type": "boolean" } } }, "ListContainersSegmentResponse": { "xml": { "name": "EnumerationResults" }, "description": "An enumeration of containers", "type": "object", "required": [ "ServiceEndpoint", "ContainerItems" ], "properties": { "ServiceEndpoint": { "type": "string", "xml": { "attribute": true } }, "Prefix": { "type": "string" }, "Marker": { "type": "string" }, "MaxResults": { "type": "integer" }, "ContainerItems": { "xml": { "wrapped": true, "name": "Containers" }, "type": "array", "items": { "$ref": "#/definitions/ContainerItem" } }, "NextMarker": { "type": "string" } } }, "CorsRule": { "description": "CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain", "type": "object", "required": [ "AllowedOrigins", "AllowedMethods", "AllowedHeaders", "ExposedHeaders", "MaxAgeInSeconds" ], "properties": { "AllowedOrigins": { "description": "The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS.", "type": "string" }, "AllowedMethods": { "description": "The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated)", "type": "string" }, "AllowedHeaders": { "description": "the request headers that the origin domain may specify on the CORS request.", "type": "string" }, "ExposedHeaders": { "description": "The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer", "type": "string" }, "MaxAgeInSeconds": { "description": "The maximum amount time that a browser should cache the preflight OPTIONS request.", "type": "integer", "minimum": 0 } } }, "ErrorCode": { "description": "Error codes returned by the service", "type": "string", "enum": [ "AccountAlreadyExists", "AccountBeingCreated", "AccountIsDisabled", "AuthenticationFailed", "AuthorizationFailure", "ConditionHeadersNotSupported", "ConditionNotMet", "EmptyMetadataKey", "InsufficientAccountPermissions", "InternalError", "InvalidAuthenticationInfo", "InvalidHeaderValue", "InvalidHttpVerb", "InvalidInput", "InvalidMd5", "InvalidMetadata", "InvalidQueryParameterValue", "InvalidRange", "InvalidResourceName", "InvalidUri", "InvalidXmlDocument", "InvalidXmlNodeValue", "Md5Mismatch", "MetadataTooLarge", "MissingContentLengthHeader", "MissingRequiredQueryParameter", "MissingRequiredHeader", "MissingRequiredXmlNode", "MultipleConditionHeadersNotSupported", "OperationTimedOut", "OutOfRangeInput", "OutOfRangeQueryParameterValue", "RequestBodyTooLarge", "ResourceTypeMismatch", "RequestUrlFailedToParse", "ResourceAlreadyExists", "ResourceNotFound", "ServerBusy", "UnsupportedHeader", "UnsupportedXmlNode", "UnsupportedQueryParameter", "UnsupportedHttpVerb", "AppendPositionConditionNotMet", "BlobAlreadyExists", "BlobNotFound", "BlobOverwritten", "BlobTierInadequateForContentLength", "BlockCountExceedsLimit", "BlockListTooLong", "CannotChangeToLowerTier", "CannotVerifyCopySource", "ContainerAlreadyExists", "ContainerBeingDeleted", "ContainerDisabled", "ContainerNotFound", "ContentLengthLargerThanTierLimit", "CopyAcrossAccountsNotSupported", "CopyIdMismatch", "FeatureVersionMismatch", "IncrementalCopyBlobMismatch", "IncrementalCopyOfEralierVersionSnapshotNotAllowed", "IncrementalCopySourceMustBeSnapshot", "InfiniteLeaseDurationRequired", "InvalidBlobOrBlock", "InvalidBlobTier", "InvalidBlobType", "InvalidBlockId", "InvalidBlockList", "InvalidOperation", "InvalidPageRange", "InvalidSourceBlobType", "InvalidSourceBlobUrl", "InvalidVersionForPageBlobOperation", "LeaseAlreadyPresent", "LeaseAlreadyBroken", "LeaseIdMismatchWithBlobOperation", "LeaseIdMismatchWithContainerOperation", "LeaseIdMismatchWithLeaseOperation", "LeaseIdMissing", "LeaseIsBreakingAndCannotBeAcquired", "LeaseIsBreakingAndCannotBeChanged", "LeaseIsBrokenAndCannotBeRenewed", "LeaseLost", "LeaseNotPresentWithBlobOperation", "LeaseNotPresentWithContainerOperation", "LeaseNotPresentWithLeaseOperation", "MaxBlobSizeConditionNotMet", "NoPendingCopyOperation", "OperationNotAllowedOnIncrementalCopyBlob", "PendingCopyOperation", "PreviousSnapshotCannotBeNewer", "PreviousSnapshotNotFound", "PreviousSnapshotOperationNotSupported", "SequenceNumberConditionNotMet", "SequenceNumberIncrementTooLarge", "SnapshotCountExceeded", "SnaphotOperationRateExceeded", "SnapshotsPresent", "SourceConditionNotMet", "SystemInUse", "TargetConditionNotMet", "UnauthorizedBlobOverwrite", "BlobBeingRehydrated", "BlobArchived", "BlobNotArchived" ], "x-ms-enum": { "name": "StorageErrorCode", "modelAsString": true } }, "GeoReplication": { "description": "Geo-Replication information for the Secondary Storage Service", "type": "object", "required": [ "Status", "LastSyncTime" ], "properties": { "Status": { "description": "The status of the secondary location", "type": "string", "enum": [ "live", "bootstrap", "unavailable" ], "x-ms-enum": { "name": "GeoReplicationStatusType", "modelAsString": true } }, "LastSyncTime": { "description": "A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads.", "type": "string", "format": "date-time-rfc1123" } } }, "Logging": { "description": "Azure Analytics Logging settings.", "type": "object", "required": [ "Version", "Delete", "Read", "Write", "RetentionPolicy" ], "properties": { "Version": { "description": "The version of Storage Analytics to configure.", "type": "string" }, "Delete": { "description": "Indicates whether all delete requests should be logged.", "type": "boolean" }, "Read": { "description": "Indicates whether all read requests should be logged.", "type": "boolean" }, "Write": { "description": "Indicates whether all write requests should be logged.", "type": "boolean" }, "RetentionPolicy": { "$ref": "#/definitions/RetentionPolicy" } } }, "Metadata": { "type": "object", "additionalProperties": { "type": "string" } }, "Metrics": { "description": "a summary of request statistics grouped by API in hour or minute aggregates for blobs", "required": [ "Enabled" ], "properties": { "Version": { "description": "The version of Storage Analytics to configure.", "type": "string" }, "Enabled": { "description": "Indicates whether metrics are enabled for the Blob service.", "type": "boolean" }, "IncludeAPIs": { "description": "Indicates whether metrics should generate summary statistics for called API operations.", "type": "boolean" }, "RetentionPolicy": { "$ref": "#/definitions/RetentionPolicy" } } }, "PageList": { "description": "the list of pages", "type": "object", "properties": { "PageRange": { "type": "array", "items": { "$ref": "#/definitions/PageRange" } }, "ClearRange": { "type": "array", "items": { "$ref": "#/definitions/ClearRange" } } } }, "PageRange": { "type": "object", "required": [ "Start", "End" ], "properties": { "Start": { "type": "integer", "format": "int64", "xml": { "name": "Start" } }, "End": { "type": "integer", "format": "int64", "xml": { "name": "End" } } }, "xml": { "name": "PageRange" } }, "ClearRange": { "type": "object", "required": [ "Start", "End" ], "properties": { "Start": { "type": "integer", "format": "int64", "xml": { "name": "Start" } }, "End": { "type": "integer", "format": "int64", "xml": { "name": "End" } } }, "xml": { "name": "ClearRange" } }, "RetentionPolicy": { "description": "the retention policy which determines how long the associated data should persist", "type": "object", "required": [ "Enabled" ], "properties": { "Enabled": { "description": "Indicates whether a retention policy is enabled for the storage service", "type": "boolean" }, "Days": { "description": "Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted", "type": "integer", "minimum": 1 } } }, "SignedIdentifier": { "xml": { "name": "SignedIdentifier" }, "description": "signed identifier", "type": "object", "required": [ "Id", "AccessPolicy" ], "properties": { "Id": { "type": "string", "description": "a unique id" }, "AccessPolicy": { "$ref": "#/definitions/AccessPolicy" } } }, "SignedIdentifiers": { "description": "a collection of signed identifiers", "type": "array", "items": { "$ref": "#/definitions/SignedIdentifier" }, "xml": { "wrapped": true, "name": "SignedIdentifiers" } }, "StaticWebsite": { "description": "The properties that enable an account to host a static website", "type": "object", "required": [ "Enabled" ], "properties": { "Enabled": { "description": "Indicates whether this account is hosting a static website", "type": "boolean" }, "IndexDocument": { "description": "The default name of the index page under each directory", "type": "string" }, "ErrorDocument404Path": { "description": "The absolute path of the custom 404 page", "type": "string" } } }, "StorageServiceProperties": { "description": "Storage Service Properties.", "type": "object", "properties": { "Logging": { "$ref": "#/definitions/Logging" }, "HourMetrics": { "$ref": "#/definitions/Metrics" }, "MinuteMetrics": { "$ref": "#/definitions/Metrics" }, "Cors": { "description": "The set of CORS rules.", "type": "array", "items": { "$ref": "#/definitions/CorsRule" }, "xml": { "wrapped": true } }, "DefaultServiceVersion": { "description": "The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27 and all more recent versions", "type": "string" }, "DeleteRetentionPolicy": { "$ref": "#/definitions/RetentionPolicy" }, "StaticWebsite": { "$ref": "#/definitions/StaticWebsite" } } }, "StorageServiceStats": { "description": "Stats for the storage service.", "type": "object", "properties": { "GeoReplication": { "$ref": "#/definitions/GeoReplication" } } } }, "parameters": { "Url": { "name": "url", "description": "The URL of the service account, container, or blob that is the targe of the desired operation.", "required": true, "type": "string", "in": "path", "x-ms-skip-url-encoding": true }, "ApiVersionParameter": { "name": "x-ms-version", "x-ms-client-name": "version", "in": "header", "required": true, "type": "string", "description": "Specifies the version of the operation to use for this request.", "enum": [ "2018-11-09" ] }, "Blob": { "name": "blob", "in": "path", "required": true, "type": "string", "pattern": "^[a-zA-Z0-9]+(?:/[a-zA-Z0-9]+)*(?:\\.[a-zA-Z0-9]+){0,1}$", "minLength": 1, "maxLength": 1024, "x-ms-parameter-location": "method", "description": "The blob name." }, "BlobCacheControl": { "name": "x-ms-blob-cache-control", "x-ms-client-name": "blobCacheControl", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request." }, "BlobConditionAppendPos": { "name": "x-ms-blob-condition-appendpos", "x-ms-client-name": "appendPosition", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "append-position-access-conditions" }, "description": "Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed)." }, "BlobConditionMaxSize": { "name": "x-ms-blob-condition-maxsize", "x-ms-client-name": "maxSize", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "append-position-access-conditions" }, "description": "Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed)." }, "BlobPublicAccess": { "name": "x-ms-blob-public-access", "x-ms-client-name": "access", "in": "header", "required": false, "x-ms-parameter-location": "method", "description": "Specifies whether data in the container may be accessed publicly and the level of access", "type": "string", "enum": [ "container", "blob" ], "x-ms-enum": { "name": "PublicAccessType", "modelAsString": true } }, "AccessTier": { "name": "x-ms-access-tier", "x-ms-client-name": "tier", "in": "header", "required": true, "type": "string", "enum": [ "P4", "P6", "P10", "P20", "P30", "P40", "P50", "Hot", "Cool", "Archive" ], "x-ms-enum": { "name": "AccessTier", "modelAsString": true }, "x-ms-parameter-location": "method", "description": "Indicates the tier to be set on the blob." }, "BlobContentDisposition": { "name": "x-ms-blob-content-disposition", "x-ms-client-name": "blobContentDisposition", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Sets the blob's Content-Disposition header." }, "BlobContentEncoding": { "name": "x-ms-blob-content-encoding", "x-ms-client-name": "blobContentEncoding", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read request." }, "BlobContentLanguage": { "name": "x-ms-blob-content-language", "x-ms-client-name": "blobContentLanguage", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read request." }, "BlobContentLengthOptional": { "name": "x-ms-blob-content-length", "x-ms-client-name": "blobContentLength", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "description": "This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary." }, "BlobContentLengthRequired": { "name": "x-ms-blob-content-length", "x-ms-client-name": "blobContentLength", "in": "header", "required": true, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "description": "This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary." }, "BlobContentMD5": { "name": "x-ms-blob-content-md5", "x-ms-client-name": "blobContentMD5", "in": "header", "required": false, "type": "string", "format": "byte", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were validated when each was uploaded." }, "BlobContentType": { "name": "x-ms-blob-content-type", "x-ms-client-name": "blobContentType", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request." }, "BlobSequenceNumber": { "name": "x-ms-blob-sequence-number", "x-ms-client-name": "blobSequenceNumber", "in": "header", "required": false, "type": "integer", "format": "int64", "default": 0, "x-ms-parameter-location": "method", "description": "Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1." }, "BlockId": { "name": "blockid", "x-ms-client-name": "blockId", "in": "query", "type": "string", "required": true, "x-ms-parameter-location": "method", "description": "A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block." }, "BlockListType": { "name": "blocklisttype", "x-ms-client-name": "listType", "in": "query", "required": true, "default": "committed", "x-ms-parameter-location": "method", "description": "Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together.", "type": "string", "enum": [ "committed", "uncommitted", "all" ], "x-ms-enum": { "name": "BlockListType", "modelAsString": false } }, "Body": { "name": "body", "in": "body", "required": true, "schema": { "type": "object", "format": "file" }, "x-ms-parameter-location": "method", "description": "Initial data" }, "ContainerAcl": { "name": "containerAcl", "in": "body", "schema": { "$ref": "#/definitions/SignedIdentifiers" }, "x-ms-parameter-location": "method", "description": "the acls for the container" }, "CopyId": { "name": "copyid", "x-ms-client-name": "copyId", "in": "query", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation." }, "ClientRequestId": { "name": "x-ms-client-request-id", "x-ms-client-name": "requestId", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled." }, "ContainerName": { "name": "containerName", "in": "path", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "The container name." }, "ContentLength": { "name": "Content-Length", "in": "header", "required": true, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "description": "The length of the request." }, "ContentMD5": { "name": "Content-MD5", "x-ms-client-name": "transactionalContentMD5", "in": "header", "required": false, "type": "string", "format": "byte", "x-ms-parameter-location": "method", "description": "Specify the transactional md5 for the body, to be validated by the service." }, "CopySource": { "name": "x-ms-copy-source", "x-ms-client-name": "copySource", "in": "header", "required": true, "type": "string", "format": "url", "x-ms-parameter-location": "method", "description": "Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature." }, "DeleteSnapshots": { "name": "x-ms-delete-snapshots", "x-ms-client-name": "deleteSnapshots", "description": "Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself", "x-ms-parameter-location": "method", "in": "header", "required": false, "type": "string", "enum": [ "include", "only" ], "x-ms-enum": { "name": "DeleteSnapshotsOptionType", "modelAsString": false } }, "Delimiter": { "name": "delimiter", "description": "When the request includes this parameter, the operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may be a single character or a string.", "type": "string", "x-ms-parameter-location": "method", "in": "query", "required": true }, "GetRangeContentMD5": { "name": "x-ms-range-get-content-md5", "x-ms-client-name": "rangeGetContentMD5", "in": "header", "required": false, "type": "boolean", "x-ms-parameter-location": "method", "description": "When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size." }, "IfMatch": { "name": "If-Match", "x-ms-client-name": "ifMatch", "in": "header", "required": false, "type": "string", "format": "etag", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "modified-access-conditions" }, "description": "Specify an ETag value to operate only on blobs with a matching value." }, "IfModifiedSince": { "name": "If-Modified-Since", "x-ms-client-name": "ifModifiedSince", "in": "header", "required": false, "type": "string", "format": "date-time-rfc1123", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "modified-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has been modified since the specified date/time." }, "IfNoneMatch": { "name": "If-None-Match", "x-ms-client-name": "ifNoneMatch", "in": "header", "required": false, "type": "string", "format": "etag", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "modified-access-conditions" }, "description": "Specify an ETag value to operate only on blobs without a matching value." }, "IfUnmodifiedSince": { "name": "If-Unmodified-Since", "x-ms-client-name": "ifUnmodifiedSince", "in": "header", "required": false, "type": "string", "format": "date-time-rfc1123", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "modified-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has not been modified since the specified date/time." }, "IfSequenceNumberEqualTo": { "name": "x-ms-if-sequence-number-eq", "x-ms-client-name": "ifSequenceNumberEqualTo", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "sequence-number-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has the specified sequence number." }, "IfSequenceNumberLessThan": { "name": "x-ms-if-sequence-number-lt", "x-ms-client-name": "ifSequenceNumberLessThan", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "sequence-number-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has a sequence number less than the specified." }, "IfSequenceNumberLessThanOrEqualTo": { "name": "x-ms-if-sequence-number-le", "x-ms-client-name": "ifSequenceNumberLessThanOrEqualTo", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "sequence-number-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified." }, "KeyInfo": { "name": "KeyInfo", "in": "body", "x-ms-parameter-location": "method", "required": true, "schema": { "$ref": "#/definitions/KeyInfo" } }, "ListBlobsInclude": { "name": "include", "in": "query", "required": false, "type": "array", "collectionFormat": "csv", "items": { "type": "string", "enum": [ "copy", "deleted", "metadata", "snapshots", "uncommittedblobs" ], "x-ms-enum": { "name": "ListBlobsIncludeItem", "modelAsString": false } }, "x-ms-parameter-location": "method", "description": "Include this parameter to specify one or more datasets to include in the response." }, "ListContainersInclude": { "name": "include", "in": "query", "required": false, "type": "string", "enum": [ "metadata" ], "x-ms-enum": { "name": "ListContainersIncludeType", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Include this parameter to specify that the container's metadata be returned as part of the response body." }, "LeaseBreakPeriod": { "name": "x-ms-lease-break-period", "x-ms-client-name": "breakPeriod", "in": "header", "required": false, "type": "integer", "x-ms-parameter-location": "method", "description": "For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately." }, "LeaseDuration": { "name": "x-ms-lease-duration", "x-ms-client-name": "duration", "in": "header", "required": false, "type": "integer", "x-ms-parameter-location": "method", "description": "Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change." }, "LeaseIdOptional": { "name": "x-ms-lease-id", "x-ms-client-name": "leaseId", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "lease-access-conditions" }, "description": "If specified, the operation only succeeds if the resource's lease is active and matches this ID." }, "LeaseIdRequired": { "name": "x-ms-lease-id", "x-ms-client-name": "leaseId", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "Specifies the current lease ID on the resource." }, "Marker": { "name": "marker", "in": "query", "required": false, "type": "string", "description": "A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client.", "x-ms-parameter-location": "method" }, "MaxResults": { "name": "maxresults", "in": "query", "required": false, "type": "integer", "minimum": 1, "x-ms-parameter-location": "method", "description": "Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000." }, "Metadata": { "name": "x-ms-meta", "x-ms-client-name": "metadata", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.", "x-ms-header-collection-prefix": "x-ms-meta-" }, "Prefix": { "name": "prefix", "in": "query", "required": false, "type": "string", "description": "Filters the results to return only containers whose name begins with the specified prefix.", "x-ms-parameter-location": "method" }, "PrevSnapshot": { "name": "prevsnapshot", "in": "query", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response will contain only pages that were changed between target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots are currently supported only for blobs created on or after January 1, 2016." }, "ProposedLeaseIdOptional": { "name": "x-ms-proposed-lease-id", "x-ms-client-name": "proposedLeaseId", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats." }, "ProposedLeaseIdRequired": { "name": "x-ms-proposed-lease-id", "x-ms-client-name": "proposedLeaseId", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats." }, "Range": { "name": "x-ms-range", "x-ms-client-name": "range", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Return only the bytes of the blob in the specified range." }, "RangeRequiredPutPageFromUrl": { "name": "x-ms-range", "x-ms-client-name": "range", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "The range of bytes to which the source range would be written. The range should be 512 aligned and range-end is required." }, "SequenceNumberAction": { "name": "x-ms-sequence-number-action", "x-ms-client-name": "sequenceNumberAction", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Required if the x-ms-blob-sequence-number header is set for the request. This property applies to page blobs only. This property indicates how the service should modify the blob's sequence number", "type": "string", "enum": [ "max", "update", "increment" ], "x-ms-enum": { "name": "SequenceNumberActionType", "modelAsString": false } }, "Snapshot": { "name": "snapshot", "in": "query", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see Creating a Snapshot of a Blob." }, "SourceContentMD5": { "name": "x-ms-source-content-md5", "x-ms-client-name": "sourceContentMD5", "in": "header", "required": false, "type": "string", "format": "byte", "x-ms-parameter-location": "method", "description": "Specify the md5 calculated for the range of bytes that must be read from the copy source." }, "SourceRange": { "name": "x-ms-source-range", "x-ms-client-name": "sourceRange", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Bytes of source data in the specified range." }, "SourceRangeRequiredPutPageFromUrl": { "name": "x-ms-source-range", "x-ms-client-name": "sourceRange", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "Bytes of source data in the specified range. The length of this range should match the ContentLength header and x-ms-range/Range destination range header." }, "SourceIfMatch": { "name": "x-ms-source-if-match", "x-ms-client-name": "sourceIfMatch", "in": "header", "required": false, "type": "string", "format": "etag", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "source-modified-access-conditions" }, "description": "Specify an ETag value to operate only on blobs with a matching value." }, "SourceIfModifiedSince": { "name": "x-ms-source-if-modified-since", "x-ms-client-name": "sourceIfModifiedSince", "in": "header", "required": false, "type": "string", "format": "date-time-rfc1123", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "source-modified-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has been modified since the specified date/time." }, "SourceIfNoneMatch": { "name": "x-ms-source-if-none-match", "x-ms-client-name": "sourceIfNoneMatch", "in": "header", "required": false, "type": "string", "format": "etag", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "source-modified-access-conditions" }, "description": "Specify an ETag value to operate only on blobs without a matching value." }, "SourceIfUnmodifiedSince": { "name": "x-ms-source-if-unmodified-since", "x-ms-client-name": "sourceIfUnmodifiedSince", "in": "header", "required": false, "type": "string", "format": "date-time-rfc1123", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "source-modified-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has not been modified since the specified date/time." }, "SourceLeaseId": { "name": "x-ms-source-lease-id", "x-ms-client-name": "sourceLeaseId", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "A lease ID for the source path. If specified, the source path must have an active lease and the leaase ID must match." }, "SourceUrl": { "name": "x-ms-copy-source", "x-ms-client-name": "sourceUrl", "in": "header", "required": true, "type": "string", "format": "url", "x-ms-parameter-location": "method", "description": "Specify a URL to the copy source." }, "StorageServiceProperties": { "name": "StorageServiceProperties", "in": "body", "required": true, "schema": { "$ref": "#/definitions/StorageServiceProperties" }, "x-ms-parameter-location": "method", "description": "The StorageService properties." }, "Timeout": { "name": "timeout", "in": "query", "required": false, "type": "integer", "minimum": 0, "x-ms-parameter-location": "method", "description": "The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations." } } }azure-storage-blob-go-0.10.0/azblob/chunkwriting.go000066400000000000000000000151401367515646300222340ustar00rootroot00000000000000package azblob import ( "bytes" "context" "encoding/base64" "encoding/binary" "errors" "fmt" "io" "sync" guuid "github.com/google/uuid" ) // blockWriter provides methods to upload blocks that represent a file to a server and commit them. // This allows us to provide a local implementation that fakes the server for hermetic testing. type blockWriter interface { StageBlock(context.Context, string, io.ReadSeeker, LeaseAccessConditions, []byte) (*BlockBlobStageBlockResponse, error) CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) } // copyFromReader copies a source io.Reader to blob storage using concurrent uploads. // TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably // useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The // max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload // at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works // well, 4 MiB or 8 MiB, and autoscale to as many goroutines within the memory limit. This gives a single dial to tweak and we can // choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). // We can even provide a utility to dial this number in for customer networks to optimize their copies. func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamToBlockBlobOptions) (*BlockBlobCommitBlockListResponse, error) { o.defaults() ctx, cancel := context.WithCancel(ctx) defer cancel() cp := &copier{ ctx: ctx, cancel: cancel, reader: from, to: to, id: newID(), o: o, ch: make(chan copierChunk, 1), errCh: make(chan error, 1), buffers: sync.Pool{ New: func() interface{} { return make([]byte, o.BufferSize) }, }, } // Starts the pools of concurrent writers. cp.wg.Add(o.MaxBuffers) for i := 0; i < o.MaxBuffers; i++ { go cp.writer() } // Send all our chunks until we get an error. var err error for { if err = cp.sendChunk(); err != nil { break } } // If the error is not EOF, then we have a problem. if err != nil && !errors.Is(err, io.EOF) { return nil, err } // Close out our upload. if err := cp.close(); err != nil { return nil, err } return cp.result, nil } // copier streams a file via chunks in parallel from a reader representing a file. // Do not use directly, instead use copyFromReader(). type copier struct { // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, // the copier has the lifetime of a function call, so its fine. ctx context.Context cancel context.CancelFunc // reader is the source to be written to storage. reader io.Reader // to is the location we are writing our chunks to. to blockWriter id *id o UploadStreamToBlockBlobOptions // num is the current chunk we are on. num int32 // ch is used to pass the next chunk of data from our reader to one of the writers. ch chan copierChunk // errCh is used to hold the first error from our concurrent writers. errCh chan error // wg provides a count of how many writers we are waiting to finish. wg sync.WaitGroup // buffers provides a pool of chunks that can be reused. buffers sync.Pool // result holds the final result from blob storage after we have submitted all chunks. result *BlockBlobCommitBlockListResponse } type copierChunk struct { buffer []byte id string } // getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error // it returns that error. Otherwise it is nil. getErr supports only returning an error once per copier. func (c *copier) getErr() error { select { case err := <-c.errCh: return err default: } return c.ctx.Err() } // sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. // sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. func (c *copier) sendChunk() error { if err := c.getErr(); err != nil { return err } buffer := c.buffers.Get().([]byte) n, err := io.ReadFull(c.reader, buffer) switch { case err == nil && n == 0: return nil case err == nil: c.ch <- copierChunk{ buffer: buffer[0:n], id: c.id.next(), } return nil case err != nil && (err == io.EOF || err == io.ErrUnexpectedEOF) && n == 0: return io.EOF } if err == io.EOF || err == io.ErrUnexpectedEOF { c.ch <- copierChunk{ buffer: buffer[0:n], id: c.id.next(), } return io.EOF } if err := c.getErr(); err != nil { return err } return err } // writer writes chunks sent on a channel. func (c *copier) writer() { defer c.wg.Done() for chunk := range c.ch { if err := c.write(chunk); err != nil { if !errors.Is(err, context.Canceled) { select { case c.errCh <- err: c.cancel() default: } return } } } } // write uploads a chunk to blob storage. func (c *copier) write(chunk copierChunk) error { defer c.buffers.Put(chunk.buffer) if err := c.ctx.Err(); err != nil { return err } _, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer), LeaseAccessConditions{}, nil) if err != nil { return fmt.Errorf("write error: %w", err) } return nil } // close commits our blocks to blob storage and closes our writer. func (c *copier) close() error { close(c.ch) c.wg.Wait() if err := c.getErr(); err != nil { return err } var err error c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions) return err } // id allows the creation of unique IDs based on UUID4 + an int32. This autoincrements. type id struct { u [64]byte num uint32 all []string } // newID constructs a new id. func newID() *id { uu := guuid.New() u := [64]byte{} copy(u[:], uu[:]) return &id{u: u} } // next returns the next ID. This is not thread-safe. func (id *id) next() string { defer func() { id.num++ }() binary.BigEndian.PutUint32((id.u[len(guuid.UUID{}):]), id.num) str := base64.StdEncoding.EncodeToString(id.u[:]) id.all = append(id.all, str) return str } // issued returns all ids that have been issued. This returned value shares the internal slice so it is not safe to modify the return. // The value is only valid until the next time next() is called. func (id *id) issued() []string { return id.all } azure-storage-blob-go-0.10.0/azblob/chunkwriting_test.go000066400000000000000000000153461367515646300233030ustar00rootroot00000000000000package azblob import ( "context" "crypto/md5" "errors" "fmt" "io" "math/rand" "os" "path/filepath" "strings" "sync/atomic" "testing" "time" ) const finalFileName = "final" type fakeBlockWriter struct { path string block int32 errOnBlock int32 } func newFakeBlockWriter() *fakeBlockWriter { f := &fakeBlockWriter{ path: filepath.Join(os.TempDir(), newUUID().String()), block: -1, errOnBlock: -1, } if err := os.MkdirAll(f.path, 0700); err != nil { panic(err) } return f } func (f *fakeBlockWriter) StageBlock(ctx context.Context, blockID string, r io.ReadSeeker, cond LeaseAccessConditions, md5 []byte) (*BlockBlobStageBlockResponse, error) { n := atomic.AddInt32(&f.block, 1) if n == f.errOnBlock { return nil, io.ErrNoProgress } blockID = strings.Replace(blockID, "/", "slash", -1) fp, err := os.OpenFile(filepath.Join(f.path, blockID), os.O_CREATE+os.O_WRONLY, 0600) if err != nil { return nil, fmt.Errorf("could not create a stage block file: %s", err) } defer fp.Close() if _, err := io.Copy(fp, r); err != nil { return nil, err } return &BlockBlobStageBlockResponse{}, nil } func (f *fakeBlockWriter) CommitBlockList(ctx context.Context, blockIDs []string, headers BlobHTTPHeaders, meta Metadata, access BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) { dst, err := os.OpenFile(filepath.Join(f.path, finalFileName), os.O_CREATE+os.O_WRONLY, 0600) if err != nil { return nil, err } defer dst.Close() for _, id := range blockIDs { id = strings.Replace(id, "/", "slash", -1) src, err := os.Open(filepath.Join(f.path, id)) if err != nil { return nil, fmt.Errorf("could not combine chunk %s: %s", id, err) } _, err = io.Copy(dst, src) src.Close() if err != nil { return nil, fmt.Errorf("problem writing final file from chunks: %s", err) } } return &BlockBlobCommitBlockListResponse{}, nil } func (f *fakeBlockWriter) cleanup() { os.RemoveAll(f.path) } func (f *fakeBlockWriter) final() string { return filepath.Join(f.path, finalFileName) } func createSrcFile(size int) (string, error) { p := filepath.Join(os.TempDir(), newUUID().String()) fp, err := os.OpenFile(p, os.O_CREATE+os.O_WRONLY, 0600) if err != nil { return "", fmt.Errorf("could not create source file: %s", err) } defer fp.Close() lr := &io.LimitedReader{R: rand.New(rand.NewSource(time.Now().UnixNano())), N: int64(size)} copied, err := io.Copy(fp, lr) switch { case err != nil && err != io.EOF: return "", fmt.Errorf("copying %v: %s", size, err) case copied != int64(size): return "", fmt.Errorf("copying %v: copied %d bytes, expected %d", size, copied, size) } return p, nil } func fileMD5(p string) string { f, err := os.Open(p) if err != nil { panic(err) } defer f.Close() h := md5.New() if _, err := io.Copy(h, f); err != nil { panic(err) } return fmt.Sprintf("%x", h.Sum(nil)) } func TestGetErr(t *testing.T) { t.Parallel() canceled, cancel := context.WithCancel(context.Background()) cancel() err := errors.New("error") tests := []struct { desc string ctx context.Context err error want error }{ {"No errors", context.Background(), nil, nil}, {"Context was cancelled", canceled, nil, context.Canceled}, {"Context was cancelled but had error", canceled, err, err}, {"Err returned", context.Background(), err, err}, } for _, test := range tests { c := copier{errCh: make(chan error, 1), ctx: test.ctx} if test.err != nil { c.errCh <- test.err } got := c.getErr() if test.want != got { t.Errorf("TestGetErr(%s): got %v, want %v", test.desc, got, test.want) } } } func TestCopyFromReader(t *testing.T) { t.Parallel() canceled, cancel := context.WithCancel(context.Background()) cancel() tests := []struct { desc string ctx context.Context o UploadStreamToBlockBlobOptions fileSize int uploadErr bool err bool }{ { desc: "context was cancelled", ctx: canceled, err: true, }, { desc: "Send file(0 KiB) with default UploadStreamToBlockBlobOptions", ctx: context.Background(), fileSize: 0, }, { desc: "Send file(10 KiB) with default UploadStreamToBlockBlobOptions", ctx: context.Background(), fileSize: 10 * 1024, }, { desc: "Send file(10 KiB) with default UploadStreamToBlockBlobOptions set to azcopy settings", ctx: context.Background(), fileSize: 10 * 1024, o: UploadStreamToBlockBlobOptions{MaxBuffers: 5, BufferSize: 8 * 1024 * 1024}, }, { desc: "Send file(1 MiB) with default UploadStreamToBlockBlobOptions", ctx: context.Background(), fileSize: _1MiB, }, { desc: "Send file(1 MiB) with default UploadStreamToBlockBlobOptions set to azcopy settings", ctx: context.Background(), fileSize: _1MiB, o: UploadStreamToBlockBlobOptions{MaxBuffers: 5, BufferSize: 8 * 1024 * 1024}, }, { desc: "Send file(1.5 MiB) with default UploadStreamToBlockBlobOptions", ctx: context.Background(), fileSize: _1MiB + 500*1024, }, { desc: "Send file(1.5 MiB) with 2 writers", ctx: context.Background(), fileSize: _1MiB + 500*1024 + 1, o: UploadStreamToBlockBlobOptions{MaxBuffers: 2}, }, { desc: "Send file(12 MiB) with 3 writers and 1 MiB buffer and a write error", ctx: context.Background(), fileSize: 12 * _1MiB, o: UploadStreamToBlockBlobOptions{MaxBuffers: 2, BufferSize: _1MiB}, uploadErr: true, err: true, }, { desc: "Send file(12 MiB) with 3 writers and 1.5 MiB buffer", ctx: context.Background(), fileSize: 12 * _1MiB, o: UploadStreamToBlockBlobOptions{MaxBuffers: 2, BufferSize: _1MiB + .5*_1MiB}, }, { desc: "Send file(12 MiB) with default UploadStreamToBlockBlobOptions set to azcopy settings", ctx: context.Background(), fileSize: 12 * _1MiB, o: UploadStreamToBlockBlobOptions{MaxBuffers: 5, BufferSize: 8 * 1024 * 1024}, }, } for _, test := range tests { p, err := createSrcFile(test.fileSize) if err != nil { panic(err) } defer os.Remove(p) from, err := os.Open(p) if err != nil { panic(err) } br := newFakeBlockWriter() defer br.cleanup() if test.uploadErr { br.errOnBlock = 1 } _, err = copyFromReader(test.ctx, from, br, test.o) switch { case err == nil && test.err: t.Errorf("TestCopyFromReader(%s): got err == nil, want err != nil", test.desc) continue case err != nil && !test.err: t.Errorf("TestCopyFromReader(%s): got err == %s, want err == nil", test.desc, err) continue case err != nil: continue } want := fileMD5(p) got := fileMD5(br.final()) if got != want { t.Errorf("TestCopyFromReader(%s): MD5 not the same: got %s, want %s", test.desc, got, want) } } } azure-storage-blob-go-0.10.0/azblob/highlevel.go000066400000000000000000000317601367515646300214750ustar00rootroot00000000000000package azblob import ( "context" "encoding/base64" "io" "net/http" "bytes" "os" "sync" "time" "errors" "github.com/Azure/azure-pipeline-go/pipeline" ) // CommonResponse returns the headers common to all blob REST API responses. type CommonResponse interface { // ETag returns the value for header ETag. ETag() ETag // LastModified returns the value for header Last-Modified. LastModified() time.Time // RequestID returns the value for header x-ms-request-id. RequestID() string // Date returns the value for header Date. Date() time.Time // Version returns the value for header x-ms-version. Version() string // Response returns the raw HTTP response object. Response() *http.Response } // UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions. type UploadToBlockBlobOptions struct { // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. BlockSize int64 // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL. // Note that the progress reporting is not always increasing; it can go down when retrying a request. Progress pipeline.ProgressReceiver // BlobHTTPHeaders indicates the HTTP headers to be associated with the blob. BlobHTTPHeaders BlobHTTPHeaders // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. Metadata Metadata // AccessConditions indicates the access conditions for the block blob. AccessConditions BlobAccessConditions // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) Parallelism uint16 } // UploadBufferToBlockBlob uploads a buffer in blocks to a block blob. func UploadBufferToBlockBlob(ctx context.Context, b []byte, blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { bufferSize := int64(len(b)) if o.BlockSize == 0 { // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { return nil, errors.New("buffer is too large to upload to a block blob") } // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request if bufferSize <= BlockBlobMaxUploadBlobBytes { o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified } else { o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB o.BlockSize = BlobDefaultDownloadBlockSize } // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). } } if bufferSize <= BlockBlobMaxUploadBlobBytes { // If the size can fit in 1 Upload call, do it this way var body io.ReadSeeker = bytes.NewReader(b) if o.Progress != nil { body = pipeline.NewRequestBodyProgress(body, o.Progress) } return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) } var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1) blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs progress := int64(0) progressLock := &sync.Mutex{} err := DoBatchTransfer(ctx, BatchTransferOptions{ OperationName: "UploadBufferToBlockBlob", TransferSize: bufferSize, ChunkSize: o.BlockSize, Parallelism: o.Parallelism, Operation: func(offset int64, count int64, ctx context.Context) error { // This function is called once per block. // It is passed this block's offset within the buffer and its count of bytes // Prepare to read the proper block/section of the buffer var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count]) blockNum := offset / o.BlockSize if o.Progress != nil { blockProgress := int64(0) body = pipeline.NewRequestBodyProgress(body, func(bytesTransferred int64) { diff := bytesTransferred - blockProgress blockProgress = bytesTransferred progressLock.Lock() // 1 goroutine at a time gets a progress report progress += diff o.Progress(progress) progressLock.Unlock() }) } // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks // at the same time causing PutBlockList to get a mix of blocks from all the clients. blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil) return err }, }) if err != nil { return nil, err } // All put blocks were successful, call Put Block List to finalize the blob return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) } // UploadFileToBlockBlob uploads a file in blocks to a block blob. func UploadFileToBlockBlob(ctx context.Context, file *os.File, blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { stat, err := file.Stat() if err != nil { return nil, err } m := mmf{} // Default to an empty slice; used for 0-size file if stat.Size() != 0 { m, err = newMMF(file, false, 0, int(stat.Size())) if err != nil { return nil, err } defer m.unmap() } return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o) } /////////////////////////////////////////////////////////////////////////////// const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB // DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions. type DownloadFromBlobOptions struct { // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. BlockSize int64 // Progress is a function that is invoked periodically as bytes are received. Progress pipeline.ProgressReceiver // AccessConditions indicates the access conditions used when making HTTP GET requests against the blob. AccessConditions BlobAccessConditions // Parallelism indicates the maximum number of blocks to download in parallel (0=default) Parallelism uint16 // RetryReaderOptionsPerBlock is used when downloading each block. RetryReaderOptionsPerBlock RetryReaderOptions } // downloadBlobToBuffer downloads an Azure blob to a buffer with parallel. func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error { if o.BlockSize == 0 { o.BlockSize = BlobDefaultDownloadBlockSize } if count == CountToEnd { // If size not specified, calculate it if initialDownloadResponse != nil { count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it } else { // If we don't have the length at all, get it dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false) if err != nil { return err } count = dr.ContentLength() - offset } } // Prepare and do parallel download. progress := int64(0) progressLock := &sync.Mutex{} err := DoBatchTransfer(ctx, BatchTransferOptions{ OperationName: "downloadBlobToBuffer", TransferSize: count, ChunkSize: o.BlockSize, Parallelism: o.Parallelism, Operation: func(chunkStart int64, count int64, ctx context.Context) error { dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false) if err != nil { return err } body := dr.Body(o.RetryReaderOptionsPerBlock) if o.Progress != nil { rangeProgress := int64(0) body = pipeline.NewResponseBodyProgress( body, func(bytesTransferred int64) { diff := bytesTransferred - rangeProgress rangeProgress = bytesTransferred progressLock.Lock() progress += diff o.Progress(progress) progressLock.Unlock() }) } _, err = io.ReadFull(body, b[chunkStart:chunkStart+count]) body.Close() return err }, }) if err != nil { return err } return nil } // DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel. // Offset and count are optional, pass 0 for both to download the entire blob. func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, b []byte, o DownloadFromBlobOptions) error { return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil) } // DownloadBlobToFile downloads an Azure blob to a local file. // The file would be truncated if the size doesn't match. // Offset and count are optional, pass 0 for both to download the entire blob. func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64, file *os.File, o DownloadFromBlobOptions) error { // 1. Calculate the size of the destination file var size int64 if count == CountToEnd { // Try to get Azure blob's size props, err := blobURL.GetProperties(ctx, o.AccessConditions) if err != nil { return err } size = props.ContentLength() - offset } else { size = count } // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. stat, err := file.Stat() if err != nil { return err } if stat.Size() != size { if err = file.Truncate(size); err != nil { return err } } if size > 0 { // 3. Set mmap and call downloadBlobToBuffer. m, err := newMMF(file, true, 0, int(size)) if err != nil { return err } defer m.unmap() return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil) } else { // if the blob's size is 0, there is no need in downloading it return nil } } /////////////////////////////////////////////////////////////////////////////// // BatchTransferOptions identifies options used by DoBatchTransfer. type BatchTransferOptions struct { TransferSize int64 ChunkSize int64 Parallelism uint16 Operation func(offset int64, chunkSize int64, ctx context.Context) error OperationName string } // DoBatchTransfer helps to execute operations in a batch manner. // Can be used by users to customize batch works (for other scenarios that the SDK does not provide) func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { if o.ChunkSize == 0 { return errors.New("ChunkSize cannot be 0") } if o.Parallelism == 0 { o.Parallelism = 5 // default Parallelism } // Prepare and do parallel operations. numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently operationResponseChannel := make(chan error, numChunks) // Holds each response ctx, cancel := context.WithCancel(ctx) defer cancel() // Create the goroutines that process each operation (in parallel). for g := uint16(0); g < o.Parallelism; g++ { //grIndex := g go func() { for f := range operationChannel { err := f() operationResponseChannel <- err } }() } // Add each chunk's operation to the channel. for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { curChunkSize := o.ChunkSize if chunkNum == numChunks-1 { // Last chunk curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total } offset := int64(chunkNum) * o.ChunkSize operationChannel <- func() error { return o.Operation(offset, curChunkSize, ctx) } } close(operationChannel) // Wait for the operations to complete. var firstErr error = nil for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { responseError := <-operationResponseChannel // record the first error (the original error which should cause the other chunks to fail with canceled context) if responseError != nil && firstErr == nil { cancel() // As soon as any operation fails, cancel all remaining operation calls firstErr = responseError } } return firstErr } //////////////////////////////////////////////////////////////////////////////////////////////// const _1MiB = 1024 * 1024 type UploadStreamToBlockBlobOptions struct { // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB. BufferSize int // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. MaxBuffers int BlobHTTPHeaders BlobHTTPHeaders Metadata Metadata AccessConditions BlobAccessConditions } func (u *UploadStreamToBlockBlobOptions) defaults() { if u.MaxBuffers == 0 { u.MaxBuffers = 1 } if u.BufferSize < _1MiB { u.BufferSize = _1MiB } } // UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobURL. // A Context deadline or cancellation will cause this to error. func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, o UploadStreamToBlockBlobOptions) (CommonResponse, error) { o.defaults() result, err := copyFromReader(ctx, reader, blockBlobURL, o) if err != nil { return nil, err } return result, nil } // UploadStreamOptions (defunct) was used internally. This will be removed or made private in a future version. type UploadStreamOptions struct { BufferSize int MaxBuffers int } azure-storage-blob-go-0.10.0/azblob/parsing_urls.go000066400000000000000000000122021367515646300222240ustar00rootroot00000000000000package azblob import ( "net" "net/url" "strings" ) const ( snapshot = "snapshot" SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" ) // A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an // existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). // NOTE: Changing any SAS-related field requires computing a new SAS signature. type BlobURLParts struct { Scheme string // Ex: "https://" Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" IPEndpointStyleInfo IPEndpointStyleInfo ContainerName string // "" if no container BlobName string // "" if no blob Snapshot string // "" if not a snapshot SAS SASQueryParameters UnparsedParams string } // IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. // Ex: "https://10.132.141.33/accountname/containername" type IPEndpointStyleInfo struct { AccountName string // "" if not using IP endpoint style } // isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: // http(s)://IP(:port)/storageaccount/container/... // As url's Host property, host could be both host or host:port func isIPEndpointStyle(host string) bool { if host == "" { return false } if h, _, err := net.SplitHostPort(host); err == nil { host = h } // For IPv6, there could be case where SplitHostPort fails for cannot finding port. // In this case, eliminate the '[' and ']' in the URL. // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 if host[0] == '[' && host[len(host)-1] == ']' { host = host[1 : len(host)-1] } return net.ParseIP(host) != nil } // NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other // query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. func NewBlobURLParts(u url.URL) BlobURLParts { up := BlobURLParts{ Scheme: u.Scheme, Host: u.Host, } // Find the container & blob names (if any) if u.Path != "" { path := u.Path if path[0] == '/' { path = path[1:] // If path starts with a slash, remove it } if isIPEndpointStyle(up.Host) { if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob up.IPEndpointStyleInfo.AccountName = path } else { up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) } } containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) if containerEndIndex == -1 { // Slash not found; path has container name & no blob name up.ContainerName = path } else { up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash } } // Convert the query parameters to a case-sensitive map & trim whitespace paramsMap := u.Query() up.Snapshot = "" // Assume no snapshot if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { up.Snapshot = snapshotStr[0] // If we recognized the query parameter, remove it from the map delete(paramsMap, snapshot) } up.SAS = newSASQueryParameters(paramsMap, true) up.UnparsedParams = paramsMap.Encode() return up } type caseInsensitiveValues url.Values // map[string][]string func (values caseInsensitiveValues) Get(key string) ([]string, bool) { key = strings.ToLower(key) for k, v := range values { if strings.ToLower(k) == key { return v, true } } return []string{}, false } // URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery // field contains the SAS, snapshot, and unparsed query parameters. func (up BlobURLParts) URL() url.URL { path := "" if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { path += "/" + up.IPEndpointStyleInfo.AccountName } // Concatenate container & blob names (if they exist) if up.ContainerName != "" { path += "/" + up.ContainerName if up.BlobName != "" { path += "/" + up.BlobName } } rawQuery := up.UnparsedParams //If no snapshot is initially provided, fill it in from the SAS query properties to help the user if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) } // Concatenate blob snapshot query parameter (if it exists) if up.Snapshot != "" { if len(rawQuery) > 0 { rawQuery += "&" } rawQuery += snapshot + "=" + up.Snapshot } sas := up.SAS.Encode() if sas != "" { if len(rawQuery) > 0 { rawQuery += "&" } rawQuery += sas } u := url.URL{ Scheme: up.Scheme, Host: up.Host, Path: path, RawQuery: rawQuery, } return u } azure-storage-blob-go-0.10.0/azblob/sas_service.go000066400000000000000000000166271367515646300220410ustar00rootroot00000000000000package azblob import ( "bytes" "fmt" "strings" "time" ) // BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas type BlobSASSignatureValues struct { Version string `param:"sv"` // If not specified, this defaults to SASVersion Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants StartTime time.Time `param:"st"` // Not specified if IsZero ExpiryTime time.Time `param:"se"` // Not specified if IsZero SnapshotTime time.Time Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() IPRange IPRange `param:"sip"` Identifier string `param:"si"` ContainerName string BlobName string // Use "" to create a Container SAS CacheControl string // rscc ContentDisposition string // rscd ContentEncoding string // rsce ContentLanguage string // rscl ContentType string // rsct } // NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce // the proper SAS query parameters. // See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountCredential) (SASQueryParameters, error) { resource := "c" if credential == nil { return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without StorageAccountCredential") } if !v.SnapshotTime.IsZero() { resource = "bs" //Make sure the permission characters are in the correct order perms := &BlobSASPermissions{} if err := perms.Parse(v.Permissions); err != nil { return SASQueryParameters{}, err } v.Permissions = perms.String() } else if v.BlobName == "" { // Make sure the permission characters are in the correct order perms := &ContainerSASPermissions{} if err := perms.Parse(v.Permissions); err != nil { return SASQueryParameters{}, err } v.Permissions = perms.String() } else { resource = "b" // Make sure the permission characters are in the correct order perms := &BlobSASPermissions{} if err := perms.Parse(v.Permissions); err != nil { return SASQueryParameters{}, err } v.Permissions = perms.String() } if v.Version == "" { v.Version = SASVersion } startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) signedIdentifier := v.Identifier udk := credential.getUDKParams() if udk != nil { udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) //I don't like this answer to combining the functions //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. signedIdentifier = strings.Join([]string{ udk.SignedOid, udk.SignedTid, udkStart, udkExpiry, udk.SignedService, udk.SignedVersion, }, "\n") } // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx stringToSign := strings.Join([]string{ v.Permissions, startTime, expiryTime, getCanonicalName(credential.AccountName(), v.ContainerName, v.BlobName), signedIdentifier, v.IPRange.String(), string(v.Protocol), v.Version, resource, snapshotTime, // signed timestamp v.CacheControl, // rscc v.ContentDisposition, // rscd v.ContentEncoding, // rsce v.ContentLanguage, // rscl v.ContentType}, // rsct "\n") signature := "" signature = credential.ComputeHMACSHA256(stringToSign) p := SASQueryParameters{ // Common SAS parameters version: v.Version, protocol: v.Protocol, startTime: v.StartTime, expiryTime: v.ExpiryTime, permissions: v.Permissions, ipRange: v.IPRange, // Container/Blob-specific SAS parameters resource: resource, identifier: v.Identifier, cacheControl: v.CacheControl, contentDisposition: v.ContentDisposition, contentEncoding: v.ContentEncoding, contentLanguage: v.ContentLanguage, contentType: v.ContentType, snapshotTime: v.SnapshotTime, // Calculated SAS signature signature: signature, } //User delegation SAS specific parameters if udk != nil { p.signedOid = udk.SignedOid p.signedTid = udk.SignedTid p.signedStart = udk.SignedStart p.signedExpiry = udk.SignedExpiry p.signedService = udk.SignedService p.signedVersion = udk.SignedVersion } return p, nil } // getCanonicalName computes the canonical name for a container or blob resource for SAS signing. func getCanonicalName(account string, containerName string, blobName string) string { // Container: "/blob/account/containername" // Blob: "/blob/account/containername/blobname" elements := []string{"/blob/", account, "/", containerName} if blobName != "" { elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) } return strings.Join(elements, "") } // The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. type ContainerSASPermissions struct { Read, Add, Create, Write, Delete, List bool } // String produces the SAS permissions string for an Azure Storage container. // Call this method to set BlobSASSignatureValues's Permissions field. func (p ContainerSASPermissions) String() string { var b bytes.Buffer if p.Read { b.WriteRune('r') } if p.Add { b.WriteRune('a') } if p.Create { b.WriteRune('c') } if p.Write { b.WriteRune('w') } if p.Delete { b.WriteRune('d') } if p.List { b.WriteRune('l') } return b.String() } // Parse initializes the ContainerSASPermissions's fields from a string. func (p *ContainerSASPermissions) Parse(s string) error { *p = ContainerSASPermissions{} // Clear the flags for _, r := range s { switch r { case 'r': p.Read = true case 'a': p.Add = true case 'c': p.Create = true case 'w': p.Write = true case 'd': p.Delete = true case 'l': p.List = true default: return fmt.Errorf("Invalid permission: '%v'", r) } } return nil } // The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool } // String produces the SAS permissions string for an Azure Storage blob. // Call this method to set BlobSASSignatureValues's Permissions field. func (p BlobSASPermissions) String() string { var b bytes.Buffer if p.Read { b.WriteRune('r') } if p.Add { b.WriteRune('a') } if p.Create { b.WriteRune('c') } if p.Write { b.WriteRune('w') } if p.Delete { b.WriteRune('d') } return b.String() } // Parse initializes the BlobSASPermissions's fields from a string. func (p *BlobSASPermissions) Parse(s string) error { *p = BlobSASPermissions{} // Clear the flags for _, r := range s { switch r { case 'r': p.Read = true case 'a': p.Add = true case 'c': p.Create = true case 'w': p.Write = true case 'd': p.Delete = true default: return fmt.Errorf("Invalid permission: '%v'", r) } } return nil } azure-storage-blob-go-0.10.0/azblob/service_codes_blob.go000066400000000000000000000300111367515646300233250ustar00rootroot00000000000000package azblob // https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes // ServiceCode values indicate a service failure. const ( // ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met. ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet" // ServiceCodeBlobAlreadyExists means the specified blob already exists. ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists" // ServiceCodeBlobNotFound means the specified blob does not exist. ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound" // ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken. ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten" // ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length. ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength" // ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks // or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks. ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit" // ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks. ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong" // ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set. ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier" // ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time. // Examine the HTTP status code and message for more information about the failure. ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource" // ServiceCodeContainerAlreadyExists means the specified container already exists. ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists" // ServiceCodeContainerBeingDeleted means the specified container is being deleted. ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted" // ServiceCodeContainerDisabled means the specified container has been disabled by the administrator. ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled" // ServiceCodeContainerNotFound means the specified container does not exist. ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound" // ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit. ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit" // ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same. ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported" // ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation. ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch" // ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or // that the operation for AppendBlob requires at least version 2015-02-21. ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch" // ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob. ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" // ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob. ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" // ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot. ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" // ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease. ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired" // ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid. ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock" // ServiceCodeInvalidBlobType means the blob type is invalid for this operation. ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType" // ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded. ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId" // ServiceCodeInvalidBlockList means the specified block list is invalid. ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList" // ServiceCodeInvalidOperation means an invalid operation against a blob snapshot. ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation" // ServiceCodeInvalidPageRange means the page range specified is invalid. ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange" // ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation. ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType" // ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL. ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl" // ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19. ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation" // ServiceCodeLeaseAlreadyPresent means there is already a lease present. ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent" // ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again. ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken" // ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob. ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation" // ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container. ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation" // ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container. ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation" // ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request. ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing" // ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken. ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired" // ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed. ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged" // ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed. ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed" // ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired. ServiceCodeLeaseLost ServiceCodeType = "LeaseLost" // ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob. ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation" // ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container. ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation" // ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container. ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation" // ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met. ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet" // ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation. ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation" // ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob. ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob" // ServiceCodePendingCopyOperation means there is currently a pending copy operation. ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation" // ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value. ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer" // ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found. ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound" // ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot. ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported" // ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met. ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet" // ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number. ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge" // ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded. ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded" // ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded. ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded" // ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots. ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent" // ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met. ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet" // ServiceCodeSystemInUse means this blob is in use by the system. ServiceCodeSystemInUse ServiceCodeType = "SystemInUse" // ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met. ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet" // ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites. ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite" // ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated. ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated" // ServiceCodeBlobArchived means this operation is not permitted on an archived blob. ServiceCodeBlobArchived ServiceCodeType = "BlobArchived" // ServiceCodeBlobNotArchived means this blob is currently not in the archived state. ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived" ) azure-storage-blob-go-0.10.0/azblob/storage_account_credential.go000066400000000000000000000004261367515646300250730ustar00rootroot00000000000000package azblob // StorageAccountCredential is a wrapper interface for SharedKeyCredential and UserDelegationCredential type StorageAccountCredential interface { AccountName() string ComputeHMACSHA256(message string) (base64String string) getUDKParams() *UserDelegationKey } azure-storage-blob-go-0.10.0/azblob/url_append_blob.go000066400000000000000000000145731367515646300226600ustar00rootroot00000000000000package azblob import ( "context" "io" "net/url" "github.com/Azure/azure-pipeline-go/pipeline" ) const ( // AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock. AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB // AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob. AppendBlobMaxBlocks = 50000 ) // AppendBlobURL defines a set of operations applicable to append blobs. type AppendBlobURL struct { BlobURL abClient appendBlobClient } // NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline. func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL { blobClient := newBlobClient(url, p) abClient := newAppendBlobClient(url, p) return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient} } // WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline. func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL { return NewAppendBlobURL(ab.blobClient.URL(), p) } // WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. // Pass "" to remove the snapshot returning a URL to the base blob. func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL { p := NewBlobURLParts(ab.URL()) p.Snapshot = snapshot return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) } func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { return ab.blobClient.GetAccountInfo(ctx) } // Create creates a 0-length append blob. Call AppendBlock to append data to an append blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() return ab.abClient.Create(ctx, 0, nil, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, nil, nil, EncryptionAlgorithmNone, // CPK ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil) } // AppendBlock writes a stream to a new block of data to the end of the existing append blob. // This method panics if the stream is not at position 0. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers() count, err := validateSeekableStreamAt0AndGetCount(body) if err != nil { return nil, err } return ab.abClient.AppendBlock(ctx, body, count, nil, transactionalMD5, nil, // CRC ac.LeaseAccessConditions.pointers(), ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, nil, nil, EncryptionAlgorithmNone, // CPK ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockFromURLResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers() return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(), transactionalMD5, nil, nil, nil, nil, nil, EncryptionAlgorithmNone, // CPK destinationAccessConditions.LeaseAccessConditions.pointers(), ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) } type AppendBlobAccessConditions struct { ModifiedAccessConditions LeaseAccessConditions AppendPositionAccessConditions } // AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set. type AppendPositionAccessConditions struct { // IfAppendPositionEqual ensures that the AppendBlock operation succeeds // only if the append position is equal to a value. // IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified. // IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value // IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0 IfAppendPositionEqual int64 // IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds // only if the append blob's size is less than or equal to a value. // IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified. // IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value // IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0 IfMaxSizeLessThanOrEqual int64 } // pointers is for internal infrastructure. It returns the fields as pointers. func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) { var zero int64 // defaults to 0 switch ac.IfAppendPositionEqual { case -1: iape = &zero case 0: iape = nil default: iape = &ac.IfAppendPositionEqual } switch ac.IfMaxSizeLessThanOrEqual { case -1: imsltoe = &zero case 0: imsltoe = nil default: imsltoe = &ac.IfMaxSizeLessThanOrEqual } return } azure-storage-blob-go-0.10.0/azblob/url_blob.go000066400000000000000000000274721367515646300213330ustar00rootroot00000000000000package azblob import ( "context" "net/url" "github.com/Azure/azure-pipeline-go/pipeline" ) // A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. type BlobURL struct { blobClient blobClient } // NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline. func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL { blobClient := newBlobClient(url, p) return BlobURL{blobClient: blobClient} } // URL returns the URL endpoint used by the BlobURL object. func (b BlobURL) URL() url.URL { return b.blobClient.URL() } // String returns the URL as a string. func (b BlobURL) String() string { u := b.URL() return u.String() } func (b BlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { return b.blobClient.GetAccountInfo(ctx) } // WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline. func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL { return NewBlobURL(b.blobClient.URL(), p) } // WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp. // Pass "" to remove the snapshot returning a URL to the base blob. func (b BlobURL) WithSnapshot(snapshot string) BlobURL { p := NewBlobURLParts(b.URL()) p.Snapshot = snapshot return NewBlobURL(p.URL(), b.blobClient.Pipeline()) } // ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline. func (b BlobURL) ToAppendBlobURL() AppendBlobURL { return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline()) } // ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline. func (b BlobURL) ToBlockBlobURL() BlockBlobURL { return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline()) } // ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline. func (b BlobURL) ToPageBlobURL() PageBlobURL { return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) } // DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata. // Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) { var xRangeGetContentMD5 *bool if rangeGetContentMD5 { xRangeGetContentMD5 = &rangeGetContentMD5 } ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() dr, err := b.blobClient.Download(ctx, nil, nil, httpRange{offset: offset, count: count}.pointers(), ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil, nil, nil, EncryptionAlgorithmNone, // CPK ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) if err != nil { return nil, err } return &DownloadResponse{ b: b, r: dr, ctx: ctx, getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, }, err } // DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. // Note that deleting a blob also deletes all its snapshots. // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. // For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) { return b.blobClient.Undelete(ctx, nil, nil) } // SetTier operation sets the tier on a blob. The operation is allowed on a page // blob in a premium storage account and on a block blob in a blob storage account (locally // redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and // bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation // does not update the blob's ETag. // For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) { return b.blobClient.SetTier(ctx, tier, nil, RehydratePriorityNone, nil, lac.pointers()) } // GetBlobProperties returns the blob's properties. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), nil, nil, EncryptionAlgorithmNone, // CPK ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // SetBlobHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return b.blobClient.SetHTTPHeaders(ctx, nil, &h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage, ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &h.ContentDisposition, nil) } // SetBlobMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(), nil, nil, EncryptionAlgorithmNone, // CPK ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // CreateSnapshot creates a read-only snapshot of a blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) { // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter // because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this // performance hit. ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return b.blobClient.CreateSnapshot(ctx, nil, metadata, nil, nil, EncryptionAlgorithmNone, // CPK ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil) } // AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between // 15 to 60 seconds, or infinite (-1). // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // RenewLease renews the blob's previously-acquired lease. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // ReleaseLease releases the blob's previously-acquired lease. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) // constant to break a fixed-duration lease when it expires or an infinite lease immediately. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // ChangeLease changes the blob's lease ID. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics. const LeaseBreakNaturally = -1 func leasePeriodPointer(period int32) (p *int32) { if period != LeaseBreakNaturally { p = &period } return nil } // StartCopyFromURL copies the data at the source URL to a blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) { srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() dstLeaseID := dstac.LeaseAccessConditions.pointers() return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata, AccessTierNone, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag, dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag, dstLeaseID, nil) } // AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. // For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) { return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil) } azure-storage-blob-go-0.10.0/azblob/url_block_blob.go000066400000000000000000000167201367515646300224770ustar00rootroot00000000000000package azblob import ( "context" "io" "net/url" "github.com/Azure/azure-pipeline-go/pipeline" ) const ( // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. BlockBlobMaxBlocks = 50000 ) // BlockBlobURL defines a set of operations applicable to block blobs. type BlockBlobURL struct { BlobURL bbClient blockBlobClient } // NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline. func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL { blobClient := newBlobClient(url, p) bbClient := newBlockBlobClient(url, p) return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient} } // WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline. func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL { return NewBlockBlobURL(bb.blobClient.URL(), p) } // WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp. // Pass "" to remove the snapshot returning a URL to the base blob. func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL { p := NewBlobURLParts(bb.URL()) p.Snapshot = snapshot return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) } func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { return bb.blobClient.GetAccountInfo(ctx) } // Upload creates a new block blob or overwrites an existing block blob. // Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not // supported with Upload; the content of the existing blob is overwritten with the new content. To // perform a partial update of a block blob, use StageBlock and CommitBlockList. // This method panics if the stream is not at position 0. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() count, err := validateSeekableStreamAt0AndGetCount(body) if err != nil { return nil, err } return bb.bbClient.Upload(ctx, body, count, nil, nil, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, nil, nil, EncryptionAlgorithmNone, // CPK AccessTierNone, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) { count, err := validateSeekableStreamAt0AndGetCount(body) if err != nil { return nil, err } return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(), nil, nil, EncryptionAlgorithmNone, // CPK nil) } // StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. // If count is CountToEnd (0), then data is read from specified offset to the end. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) { sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil, nil, nil, EncryptionAlgorithmNone, // CPK destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) } // CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. // In order to be written as part of a blob, a block must have been successfully written // to the server in a prior PutBlock operation. You can call PutBlockList to update a blob // by uploading only those blocks that have changed, then committing the new and existing // blocks together. Any blocks not specified in the block list and permanently deleted. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil, &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, nil, nil, EncryptionAlgorithmNone, // CPK AccessTierNone, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) { return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil) } // CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte) (*BlobCopyFromURLResponse, error) { srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() dstLeaseID := dstac.LeaseAccessConditions.pointers() return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, AccessTierNone, srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag, dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag, dstLeaseID, nil, srcContentMD5) } azure-storage-blob-go-0.10.0/azblob/url_container.go000066400000000000000000000335761367515646300224010ustar00rootroot00000000000000package azblob import ( "bytes" "context" "errors" "fmt" "net/url" "github.com/Azure/azure-pipeline-go/pipeline" ) // A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs. type ContainerURL struct { client containerClient } // NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline. func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL { client := newContainerClient(url, p) return ContainerURL{client: client} } // URL returns the URL endpoint used by the ContainerURL object. func (c ContainerURL) URL() url.URL { return c.client.URL() } // String returns the URL as a string. func (c ContainerURL) String() string { u := c.URL() return u.String() } func (c ContainerURL) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) { return c.client.GetAccountInfo(ctx) } // WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline. func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL { return NewContainerURL(c.URL(), p) } // NewBlobURL creates a new BlobURL object by concatenating blobName to the end of // ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL. // To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the // desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's // NewBlobURL method. func (c ContainerURL) NewBlobURL(blobName string) BlobURL { blobURL := appendToURLPath(c.URL(), blobName) return NewBlobURL(blobURL, c.client.Pipeline()) } // NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of // ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL. // To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the // desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's // NewAppendBlobURL method. func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL { blobURL := appendToURLPath(c.URL(), blobName) return NewAppendBlobURL(blobURL, c.client.Pipeline()) } // NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of // ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL. // To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the // desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's // NewBlockBlobURL method. func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL { blobURL := appendToURLPath(c.URL(), blobName) return NewBlockBlobURL(blobURL, c.client.Pipeline()) } // NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of // ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL. // To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the // desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's // NewPageBlobURL method. func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL { blobURL := appendToURLPath(c.URL(), blobName) return NewPageBlobURL(blobURL, c.client.Pipeline()) } // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. // For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { return c.client.Create(ctx, nil, metadata, publicAccessType, nil) } // Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) { if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") } ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, nil) } // GetProperties returns the container's properties. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) { // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. // This allows us to not expose a GetProperties method at all simplifying the API. return c.client.GetProperties(ctx, nil, ac.pointers(), nil) } // SetMetadata sets the container's metadata. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) { if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service") } ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers() return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil) } // GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) { return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil) } // The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. // Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. type AccessPolicyPermission struct { Read, Add, Create, Write, Delete, List bool } // String produces the access policy permission string for an Azure Storage container. // Call this method to set AccessPolicy's Permission field. func (p AccessPolicyPermission) String() string { var b bytes.Buffer if p.Read { b.WriteRune('r') } if p.Add { b.WriteRune('a') } if p.Create { b.WriteRune('c') } if p.Write { b.WriteRune('w') } if p.Delete { b.WriteRune('d') } if p.List { b.WriteRune('l') } return b.String() } // Parse initializes the AccessPolicyPermission's fields from a string. func (p *AccessPolicyPermission) Parse(s string) error { *p = AccessPolicyPermission{} // Clear the flags for _, r := range s { switch r { case 'r': p.Read = true case 'a': p.Add = true case 'c': p.Create = true case 'w': p.Write = true case 'd': p.Delete = true case 'l': p.List = true default: return fmt.Errorf("invalid permission: '%v'", r) } } return nil } // SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier, ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) { if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") } ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(), accessType, ifModifiedSince, ifUnmodifiedSince, nil) } // AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1). // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() return c.client.AcquireLease(ctx, nil, &duration, &proposedID, ifModifiedSince, ifUnmodifiedSince, nil) } // RenewLease renews the container's previously-acquired lease. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) } // ReleaseLease releases the container's previously-acquired lease. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) } // BreakLease breaks the container's previously-acquired lease (if it exists). // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil) } // ChangeLease changes the container's lease ID. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil) } // ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty // Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. // After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the // previously-returned Marker) to get the next segment. // For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) { prefix, include, maxResults := o.pointers() return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) } // ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty // Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. // After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the // previously-returned Marker) to get the next segment. // For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) { if o.Details.Snapshots { return nil, errors.New("snapshots are not supported in this listing operation") } prefix, include, maxResults := o.pointers() return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil) } // ListBlobsSegmentOptions defines options available when calling ListBlobs. type ListBlobsSegmentOptions struct { Details BlobListingDetails // No IncludeType header is produced if "" Prefix string // No Prefix header is produced if "" // SetMaxResults sets the maximum desired results you want the service to return. Note, the // service may return fewer results than requested. // MaxResults=0 means no 'MaxResults' header specified. MaxResults int32 } func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) { if o.Prefix != "" { prefix = &o.Prefix } include = o.Details.slice() if o.MaxResults != 0 { maxResults = &o.MaxResults } return } // BlobListingDetails indicates what additional information the service should return with each blob. type BlobListingDetails struct { Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool } // string produces the Include query parameter's value. func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType { items := []ListBlobsIncludeItemType{} // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! if d.Copy { items = append(items, ListBlobsIncludeItemCopy) } if d.Deleted { items = append(items, ListBlobsIncludeItemDeleted) } if d.Metadata { items = append(items, ListBlobsIncludeItemMetadata) } if d.Snapshots { items = append(items, ListBlobsIncludeItemSnapshots) } if d.UncommittedBlobs { items = append(items, ListBlobsIncludeItemUncommittedblobs) } return items } azure-storage-blob-go-0.10.0/azblob/url_page_blob.go000066400000000000000000000302731367515646300223200ustar00rootroot00000000000000package azblob import ( "context" "fmt" "io" "net/url" "strconv" "github.com/Azure/azure-pipeline-go/pipeline" ) const ( // PageBlobPageBytes indicates the number of bytes in a page (512). PageBlobPageBytes = 512 // PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage. PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB ) // PageBlobURL defines a set of operations applicable to page blobs. type PageBlobURL struct { BlobURL pbClient pageBlobClient } // NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline. func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL { blobClient := newBlobClient(url, p) pbClient := newPageBlobClient(url, p) return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient} } // WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline. func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL { return NewPageBlobURL(pb.blobClient.URL(), p) } // WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. // Pass "" to remove the snapshot returning a URL to the base blob. func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL { p := NewBlobURLParts(pb.URL()) p.Snapshot = snapshot return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) } func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { return pb.blobClient.GetAccountInfo(ctx) } // Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return pb.pbClient.Create(ctx, 0, size, nil, PremiumPageBlobAccessTierNone, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, nil, nil, EncryptionAlgorithmNone, // CPK ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil) } // UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. // This method panics if the stream is not at position 0. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) { count, err := validateSeekableStreamAt0AndGetCount(body) if err != nil { return nil, err } ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, nil, PageRange{Start: offset, End: offset + count - 1}.pointers(), ac.LeaseAccessConditions.pointers(), nil, nil, EncryptionAlgorithmNone, // CPK ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. // The sourceOffset specifies the start offset of source data to copy from. // The destOffset specifies the start offset of data in page blob will be written to. // The count must be a multiple of 512 bytes. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*PageBlobUploadPagesFromURLResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers() return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0, *PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil, nil, nil, EncryptionAlgorithmNone, // CPK destinationAccessConditions.LeaseAccessConditions.pointers(), ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) } // ClearPages frees the specified pages from the page blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() return pb.pbClient.ClearPages(ctx, 0, nil, PageRange{Start: offset, End: offset + count - 1}.pointers(), ac.LeaseAccessConditions.pointers(), nil, nil, EncryptionAlgorithmNone, // CPK ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return pb.pbClient.GetPageRanges(ctx, nil, nil, httpRange{offset: offset, count: count}.pointers(), ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot, httpRange{offset: offset, count: count}.pointers(), ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // Resize resizes the page blob to the specified size (which must be a multiple of 512). // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(), nil, nil, EncryptionAlgorithmNone, // CPK ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } // SetSequenceNumber sets the page blob's sequence number. func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) { sn := &sequenceNumber if action == SequenceNumberActionIncrement { sn = nil } ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() return pb.pbClient.UpdateSequenceNumber(ctx, action, nil, ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sn, nil) } // StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob. // The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. // The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. // For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and // https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() qp := source.Query() qp.Set("snapshot", snapshot) source.RawQuery = qp.Encode() return pb.pbClient.CopyIncremental(ctx, source.String(), nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } func (pr PageRange) pointers() *string { endOffset := strconv.FormatInt(int64(pr.End), 10) asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset) return &asString } type PageBlobAccessConditions struct { ModifiedAccessConditions LeaseAccessConditions SequenceNumberAccessConditions } // SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set. type SequenceNumberAccessConditions struct { // IfSequenceNumberLessThan ensures that the page blob operation succeeds // only if the blob's sequence number is less than a value. // IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified. // IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value // IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0 IfSequenceNumberLessThan int64 // IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds // only if the blob's sequence number is less than or equal to a value. // IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified. // IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value // IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0 IfSequenceNumberLessThanOrEqual int64 // IfSequenceNumberEqual ensures that the page blob operation succeeds // only if the blob's sequence number is equal to a value. // IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified. // IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value // IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0 IfSequenceNumberEqual int64 } // pointers is for internal infrastructure. It returns the fields as pointers. func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) { var zero int64 // Defaults to 0 switch ac.IfSequenceNumberLessThan { case -1: snlt = &zero case 0: snlt = nil default: snlt = &ac.IfSequenceNumberLessThan } switch ac.IfSequenceNumberLessThanOrEqual { case -1: snltoe = &zero case 0: snltoe = nil default: snltoe = &ac.IfSequenceNumberLessThanOrEqual } switch ac.IfSequenceNumberEqual { case -1: sne = &zero case 0: sne = nil default: sne = &ac.IfSequenceNumberEqual } return } azure-storage-blob-go-0.10.0/azblob/url_service.go000066400000000000000000000141421367515646300220430ustar00rootroot00000000000000package azblob import ( "context" "net/url" "strings" "time" "github.com/Azure/azure-pipeline-go/pipeline" ) const ( // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. ContainerNameRoot = "$root" // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. ContainerNameLogs = "$logs" ) // A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers. type ServiceURL struct { client serviceClient } // NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline. func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL { client := newServiceClient(primaryURL, p) return ServiceURL{client: client} } //GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. //OAuth is required for this call, as well as any role that can delegate access to the storage account. func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInfo, timeout *int32, requestID *string) (UserDelegationCredential, error) { sc := newServiceClient(s.client.url, s.client.p) udk, err := sc.GetUserDelegationKey(ctx, info, timeout, requestID) if err != nil { return UserDelegationCredential{}, err } return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil } //TODO this was supposed to be generated //NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion func NewKeyInfo(Start, Expiry time.Time) KeyInfo { return KeyInfo{ Start: Start.UTC().Format(SASTimeFormat), Expiry: Expiry.UTC().Format(SASTimeFormat), } } func (s ServiceURL) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { return s.client.GetAccountInfo(ctx) } // URL returns the URL endpoint used by the ServiceURL object. func (s ServiceURL) URL() url.URL { return s.client.URL() } // String returns the URL as a string. func (s ServiceURL) String() string { u := s.URL() return u.String() } // WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline. func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL { return NewServiceURL(s.URL(), p) } // NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of // ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL. // To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the // desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's // NewContainerURL method. func (s ServiceURL) NewContainerURL(containerName string) ContainerURL { containerURL := appendToURLPath(s.URL(), containerName) return NewContainerURL(containerURL, s.client.Pipeline()) } // appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) func appendToURLPath(u url.URL, name string) url.URL { // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" // When you call url.Parse() this is what you'll get: // Scheme: "https" // Opaque: "" // User: nil // Host: "ms.com" // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash // RawPath: "" // ForceQuery: false // RawQuery: "k1=v1&k2=v2" // Fragment: "f" if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' { u.Path += "/" // Append "/" to end before appending name } u.Path += name return u } // ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty // Marker to start enumeration from the beginning. Container names are returned in lexicographic order. // After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the // previously-returned Marker) to get the next segment. For more information, see // https://docs.microsoft.com/rest/api/storageservices/list-containers2. func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) { prefix, include, maxResults := o.pointers() return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) } // ListContainersOptions defines options available when calling ListContainers. type ListContainersSegmentOptions struct { Detail ListContainersDetail // No IncludeType header is produced if "" Prefix string // No Prefix header is produced if "" MaxResults int32 // 0 means unspecified // TODO: update swagger to generate this type? } func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) { if o.Prefix != "" { prefix = &o.Prefix } if o.MaxResults != 0 { maxResults = &o.MaxResults } include = ListContainersIncludeType(o.Detail.string()) return } // ListContainersFlatDetail indicates what additional information the service should return with each container. type ListContainersDetail struct { // Tells the service whether to return metadata for each container. Metadata bool } // string produces the Include query parameter's value. func (d *ListContainersDetail) string() string { items := make([]string, 0, 1) // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! if d.Metadata { items = append(items, string(ListContainersIncludeMetadata)) } if len(items) > 0 { return strings.Join(items, ",") } return string(ListContainersIncludeNone) } func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) { return bsu.client.GetProperties(ctx, nil, nil) } func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) { return bsu.client.SetProperties(ctx, properties, nil, nil) } func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { return bsu.client.GetStatistics(ctx, nil, nil) } azure-storage-blob-go-0.10.0/azblob/user_delegation_credential.go000066400000000000000000000021001367515646300250530ustar00rootroot00000000000000package azblob import ( "crypto/hmac" "crypto/sha256" "encoding/base64" ) // NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's name and a user delegation key from it func NewUserDelegationCredential(accountName string, key UserDelegationKey) UserDelegationCredential { return UserDelegationCredential{ accountName: accountName, accountKey: key, } } type UserDelegationCredential struct { accountName string accountKey UserDelegationKey } // AccountName returns the Storage account's name func (f UserDelegationCredential) AccountName() string { return f.accountName } // ComputeHMAC func (f UserDelegationCredential) ComputeHMACSHA256(message string) (base64String string) { bytes, _ := base64.StdEncoding.DecodeString(f.accountKey.Value) h := hmac.New(sha256.New, bytes) h.Write([]byte(message)) return base64.StdEncoding.EncodeToString(h.Sum(nil)) } // Private method to return important parameters for NewSASQueryParameters func (f UserDelegationCredential) getUDKParams() *UserDelegationKey { return &f.accountKey } azure-storage-blob-go-0.10.0/azblob/version.go000066400000000000000000000000611367515646300212010ustar00rootroot00000000000000package azblob const serviceLibVersion = "0.10" azure-storage-blob-go-0.10.0/azblob/zc_credential_anonymous.go000066400000000000000000000034311367515646300244360ustar00rootroot00000000000000package azblob import ( "context" "github.com/Azure/azure-pipeline-go/pipeline" ) // Credential represent any credential type; it is used to create a credential policy Factory. type Credential interface { pipeline.Factory credentialMarker() } type credentialFunc pipeline.FactoryFunc func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { return f(next, po) } // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. func (credentialFunc) credentialMarker() {} ////////////////////////////// // NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource // or for use with Shared Access Signatures (SAS). func NewAnonymousCredential() Credential { return anonymousCredentialFactory } var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton // anonymousCredentialPolicyFactory is the credential's policy factory. type anonymousCredentialPolicyFactory struct { } // New creates a credential policy object. func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { return &anonymousCredentialPolicy{next: next} } // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. func (*anonymousCredentialPolicyFactory) credentialMarker() {} // anonymousCredentialPolicy is the credential's policy object. type anonymousCredentialPolicy struct { next pipeline.Policy } // Do implements the credential's policy interface. func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { // For anonymous credentials, this is effectively a no-op return p.next.Do(ctx, request) } azure-storage-blob-go-0.10.0/azblob/zc_credential_shared_key.go000066400000000000000000000154551367515646300245350ustar00rootroot00000000000000package azblob import ( "bytes" "context" "crypto/hmac" "crypto/sha256" "encoding/base64" "errors" "net/http" "net/url" "sort" "strings" "time" "github.com/Azure/azure-pipeline-go/pipeline" ) // NewSharedKeyCredential creates an immutable SharedKeyCredential containing the // storage account's name and either its primary or secondary key. func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { bytes, err := base64.StdEncoding.DecodeString(accountKey) if err != nil { return &SharedKeyCredential{}, err } return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil } // SharedKeyCredential contains an account's name and its primary or secondary key. // It is immutable making it shareable and goroutine-safe. type SharedKeyCredential struct { // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only accountName string accountKey []byte } // AccountName returns the Storage account's name. func (f SharedKeyCredential) AccountName() string { return f.accountName } func (f SharedKeyCredential) getAccountKey() []byte { return f.accountKey } // noop function to satisfy StorageAccountCredential interface func (f SharedKeyCredential) getUDKParams() *UserDelegationKey { return nil } // New creates a credential policy object. func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { // Add a x-ms-date header if it doesn't already exist if d := request.Header.Get(headerXmsDate); d == "" { request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} } stringToSign, err := f.buildStringToSign(request) if err != nil { return nil, err } signature := f.ComputeHMACSHA256(stringToSign) authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "") request.Header[headerAuthorization] = []string{authHeader} response, err := next.Do(ctx, request) if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden { // Service failed to authenticate request, log it po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") } return response, err }) } // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. func (*SharedKeyCredential) credentialMarker() {} // Constants ensuring that header names are correctly spelled and consistently cased. const ( headerAuthorization = "Authorization" headerCacheControl = "Cache-Control" headerContentEncoding = "Content-Encoding" headerContentDisposition = "Content-Disposition" headerContentLanguage = "Content-Language" headerContentLength = "Content-Length" headerContentMD5 = "Content-MD5" headerContentType = "Content-Type" headerDate = "Date" headerIfMatch = "If-Match" headerIfModifiedSince = "If-Modified-Since" headerIfNoneMatch = "If-None-Match" headerIfUnmodifiedSince = "If-Unmodified-Since" headerRange = "Range" headerUserAgent = "User-Agent" headerXmsDate = "x-ms-date" headerXmsVersion = "x-ms-version" ) // ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. func (f SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) { h := hmac.New(sha256.New, f.accountKey) h.Write([]byte(message)) return base64.StdEncoding.EncodeToString(h.Sum(nil)) } func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) { // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services headers := request.Header contentLength := headers.Get(headerContentLength) if contentLength == "0" { contentLength = "" } canonicalizedResource, err := f.buildCanonicalizedResource(request.URL) if err != nil { return "", err } stringToSign := strings.Join([]string{ request.Method, headers.Get(headerContentEncoding), headers.Get(headerContentLanguage), contentLength, headers.Get(headerContentMD5), headers.Get(headerContentType), "", // Empty date because x-ms-date is expected (as per web page above) headers.Get(headerIfModifiedSince), headers.Get(headerIfMatch), headers.Get(headerIfNoneMatch), headers.Get(headerIfUnmodifiedSince), headers.Get(headerRange), buildCanonicalizedHeader(headers), canonicalizedResource, }, "\n") return stringToSign, nil } func buildCanonicalizedHeader(headers http.Header) string { cm := map[string][]string{} for k, v := range headers { headerName := strings.TrimSpace(strings.ToLower(k)) if strings.HasPrefix(headerName, "x-ms-") { cm[headerName] = v // NOTE: the value must not have any whitespace around it. } } if len(cm) == 0 { return "" } keys := make([]string, 0, len(cm)) for key := range cm { keys = append(keys, key) } sort.Strings(keys) ch := bytes.NewBufferString("") for i, key := range keys { if i > 0 { ch.WriteRune('\n') } ch.WriteString(key) ch.WriteRune(':') ch.WriteString(strings.Join(cm[key], ",")) } return string(ch.Bytes()) } func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services cr := bytes.NewBufferString("/") cr.WriteString(f.accountName) if len(u.Path) > 0 { // Any portion of the CanonicalizedResource string that is derived from // the resource's URI should be encoded exactly as it is in the URI. // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx cr.WriteString(u.EscapedPath()) } else { // a slash is required to indicate the root path cr.WriteString("/") } // params is a map[string][]string; param name is key; params values is []string params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values if err != nil { return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code") } if len(params) > 0 { // There is at least 1 query parameter paramNames := []string{} // We use this to sort the parameter key names for paramName := range params { paramNames = append(paramNames, paramName) // paramNames must be lowercase } sort.Strings(paramNames) for _, paramName := range paramNames { paramValues := params[paramName] sort.Strings(paramValues) // Join the sorted key values separated by ',' // Then prepend "keyName:"; then add this string to the buffer cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) } } return string(cr.Bytes()), nil } azure-storage-blob-go-0.10.0/azblob/zc_credential_token.go000066400000000000000000000126461367515646300235360ustar00rootroot00000000000000package azblob import ( "context" "errors" "sync/atomic" "runtime" "sync" "time" "github.com/Azure/azure-pipeline-go/pipeline" ) // TokenRefresher represents a callback method that you write; this method is called periodically // so you can refresh the token credential's value. type TokenRefresher func(credential TokenCredential) time.Duration // TokenCredential represents a token credential (which is also a pipeline.Factory). type TokenCredential interface { Credential Token() string SetToken(newToken string) } // NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage // resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for // tokenRefresher, then the function you pass will be called immediately so it can refresh and change the // TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration // indicating how long the TokenCredential object should wait before calling your tokenRefresher function again. // If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your // TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a // token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline. func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential { tc := &tokenCredential{} tc.SetToken(initialToken) // We don't set it above to guarantee atomicity if tokenRefresher == nil { return tc // If no callback specified, return the simple tokenCredential } tcwr := &tokenCredentialWithRefresh{token: tc} tcwr.token.startRefresh(tokenRefresher) runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) { deadTC.token.stopRefresh() deadTC.token = nil // Sanity (not really required) }) return tcwr } // tokenCredentialWithRefresh is a wrapper over a token credential. // When this wrapper object gets GC'd, it stops the tokenCredential's timer // which allows the tokenCredential object to also be GC'd. type tokenCredentialWithRefresh struct { token *tokenCredential } // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. func (*tokenCredentialWithRefresh) credentialMarker() {} // Token returns the current token value func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() } // SetToken changes the current token value func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) } // New satisfies pipeline.Factory's New method creating a pipeline policy object. func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { return f.token.New(next, po) } /////////////////////////////////////////////////////////////////////////////// // tokenCredential is a pipeline.Factory is the credential's policy factory. type tokenCredential struct { token atomic.Value // The members below are only used if the user specified a tokenRefresher callback function. timer *time.Timer tokenRefresher TokenRefresher lock sync.Mutex stopped bool } // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. func (*tokenCredential) credentialMarker() {} // Token returns the current token value func (f *tokenCredential) Token() string { return f.token.Load().(string) } // SetToken changes the current token value func (f *tokenCredential) SetToken(token string) { f.token.Store(token) } // startRefresh calls refresh which immediately calls tokenRefresher // and then starts a timer to call tokenRefresher in the future. func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) { f.tokenRefresher = tokenRefresher f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again f.refresh() } // refresh calls the user's tokenRefresher so they can refresh the token (by // calling SetToken) and then starts another time (based on the returned duration) // in order to refresh the token again in the future. func (f *tokenCredential) refresh() { d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock if d > 0 { // If duration is 0 or negative, refresher wants to not be called again f.lock.Lock() if !f.stopped { f.timer = time.AfterFunc(d, f.refresh) } f.lock.Unlock() } } // stopRefresh stops any pending timer and sets stopped field to true to prevent // any new timer from starting. // NOTE: Stopping the timer allows the GC to destroy the tokenCredential object. func (f *tokenCredential) stopRefresh() { f.lock.Lock() f.stopped = true if f.timer != nil { f.timer.Stop() } f.lock.Unlock() } // New satisfies pipeline.Factory's New method creating a pipeline policy object. func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { if request.URL.Scheme != "https" { // HTTPS must be used, otherwise the tokens are at the risk of being exposed return nil, errors.New("token credentials require a URL using the https protocol scheme") } request.Header[headerAuthorization] = []string{"Bearer " + f.Token()} return next.Do(ctx, request) }) } azure-storage-blob-go-0.10.0/azblob/zc_mmf_unix.go000066400000000000000000000012221367515646300220320ustar00rootroot00000000000000// +build linux darwin freebsd openbsd netbsd dragonfly solaris package azblob import ( "os" "golang.org/x/sys/unix" ) type mmf []byte func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { prot, flags := unix.PROT_READ, unix.MAP_SHARED // Assume read-only if writable { prot, flags = unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED } addr, err := unix.Mmap(int(file.Fd()), offset, length, prot, flags) return mmf(addr), err } func (m *mmf) unmap() { err := unix.Munmap(*m) *m = nil if err != nil { panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") } } azure-storage-blob-go-0.10.0/azblob/zc_mmf_windows.go000066400000000000000000000021371367515646300225470ustar00rootroot00000000000000package azblob import ( "os" "reflect" "syscall" "unsafe" ) type mmf []byte func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only if writable { prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) } hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil) if hMMF == 0 { return nil, os.NewSyscallError("CreateFileMapping", errno) } defer syscall.CloseHandle(hMMF) addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) m := mmf{} h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) h.Data = addr h.Len = length h.Cap = h.Len return m, nil } func (m *mmf) unmap() { addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) *m = mmf{} err := syscall.UnmapViewOfFile(addr) if err != nil { panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") } } azure-storage-blob-go-0.10.0/azblob/zc_pipeline.go000066400000000000000000000031411367515646300220170ustar00rootroot00000000000000package azblob import ( "github.com/Azure/azure-pipeline-go/pipeline" ) // PipelineOptions is used to configure a request policy pipeline's retry policy and logging. type PipelineOptions struct { // Log configures the pipeline's logging infrastructure indicating what information is logged and where. Log pipeline.LogOptions // Retry configures the built-in retry policy behavior. Retry RetryOptions // RequestLog configures the built-in request logging policy. RequestLog RequestLogOptions // Telemetry configures the built-in telemetry policy behavior. Telemetry TelemetryOptions // HTTPSender configures the sender of HTTP requests HTTPSender pipeline.Factory } // NewPipeline creates a Pipeline using the specified credentials and options. func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { // Closest to API goes first; closest to the wire goes last f := []pipeline.Factory{ NewTelemetryPolicyFactory(o.Telemetry), NewUniqueRequestIDPolicyFactory(), NewRetryPolicyFactory(o.Retry), } if _, ok := c.(*anonymousCredentialPolicyFactory); !ok { // For AnonymousCredential, we optimize out the policy factory since it doesn't do anything // NOTE: The credential's policy factory must appear close to the wire so it can sign any // changes made by other factories (like UniqueRequestIDPolicyFactory) f = append(f, c) } f = append(f, NewRequestLogPolicyFactory(o.RequestLog), pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log}) } azure-storage-blob-go-0.10.0/azblob/zc_policy_request_log.go000066400000000000000000000147031367515646300241300ustar00rootroot00000000000000package azblob import ( "bytes" "context" "fmt" "net/http" "net/url" "runtime" "strings" "time" "github.com/Azure/azure-pipeline-go/pipeline" ) // RequestLogOptions configures the retry policy's behavior. type RequestLogOptions struct { // LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified // duration (-1=no logging; 0=default threshold). LogWarningIfTryOverThreshold time.Duration } func (o RequestLogOptions) defaults() RequestLogOptions { if o.LogWarningIfTryOverThreshold == 0 { // It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/ // But this monitors the time to get the HTTP response; NOT the time to download the response body. o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds } return o } // NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options. func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { o = o.defaults() // Force defaults to be calculated return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { // These variables are per-policy; shared by multiple calls to Do var try int32 operationStart := time.Now() // If this is the 1st try, record the operation state time return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { try++ // The first try is #1 (not #0) // Log the outgoing request as informational if po.ShouldLog(pipeline.LogInfo) { b := &bytes.Buffer{} fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try) pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil) po.Log(pipeline.LogInfo, b.String()) } // Set the time for this particular retry operation and then Do the operation. tryStart := time.Now() response, err = next.Do(ctx, request) // Make the request tryEnd := time.Now() tryDuration := tryEnd.Sub(tryStart) opDuration := tryEnd.Sub(operationStart) logLevel, forceLog := pipeline.LogInfo, false // Default logging information // If the response took too long, we'll upgrade to warning. if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { // Log a warning if the try duration exceeded the specified threshold logLevel, forceLog = pipeline.LogWarning, true } if err == nil { // We got a response from the service sc := response.Response().StatusCode if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx } else { // For other status codes, we leave the level as is. } } else { // This error did not get an HTTP response from the service; upgrade the severity to Error logLevel, forceLog = pipeline.LogError, true } if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { // We're going to log this; build the string to log b := &bytes.Buffer{} slow := "" if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold) } fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) if err != nil { // This HTTP request did not get a response from the service fmt.Fprint(b, "REQUEST ERROR\n") } else { if logLevel == pipeline.LogError { fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") } else { fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") } } pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err) if logLevel <= pipeline.LogError { b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation) } msg := b.String() if forceLog { pipeline.ForceLog(logLevel, msg) } if shouldLog { po.Log(logLevel, msg) } } return response, err } }) } // RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret. func RedactSigQueryParam(rawQuery string) (bool, string) { rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig= sigFound := strings.Contains(rawQuery, "?sig=") if !sigFound { sigFound = strings.Contains(rawQuery, "&sig=") if !sigFound { return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation) } } // [?|&]sig= found, redact its value values, _ := url.ParseQuery(rawQuery) for name := range values { if strings.EqualFold(name, "sig") { values[name] = []string{"REDACTED"} } } return sigFound, values.Encode() } func prepareRequestForLogging(request pipeline.Request) *http.Request { req := request if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound { // Make copy so we don't destroy the query parameters we actually need to send in the request req = request.Copy() req.Request.URL.RawQuery = rawQuery } return prepareRequestForServiceLogging(req) } func stack() []byte { buf := make([]byte, 1024) for { n := runtime.Stack(buf, false) if n < len(buf) { return buf[:n] } buf = make([]byte, 2*len(buf)) } } /////////////////////////////////////////////////////////////////////////////////////// // Redact phase useful for blob and file service only. For other services, // this method can directly return request.Request. /////////////////////////////////////////////////////////////////////////////////////// func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { req := request if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist { req = request.Copy() url, err := url.Parse(req.Header.Get(key)) if err == nil { if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound { url.RawQuery = rawQuery req.Header.Set(xMsCopySourceHeader, url.String()) } } } return req.Request } const xMsCopySourceHeader = "x-ms-copy-source" func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) { for keyInHeader := range header { if strings.EqualFold(keyInHeader, key) { return true, keyInHeader } } return false, "" } azure-storage-blob-go-0.10.0/azblob/zc_policy_retry.go000066400000000000000000000416661367515646300227540ustar00rootroot00000000000000package azblob import ( "context" "errors" "io" "io/ioutil" "math/rand" "net" "net/http" "strconv" "strings" "time" "github.com/Azure/azure-pipeline-go/pipeline" ) // RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants. type RetryPolicy int32 const ( // RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy RetryPolicyExponential RetryPolicy = 0 // RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy RetryPolicyFixed RetryPolicy = 1 ) // RetryOptions configures the retry policy's behavior. type RetryOptions struct { // Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\ // A value of zero means that you accept our default policy. Policy RetryPolicy // MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default). // A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries. MaxTries int32 // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. // A value of zero means that you accept our default timeout. NOTE: When transferring large amounts // of data, the default TryTimeout will probably not be sufficient. You should override this value // based on the bandwidth available to the host machine and proximity to the Storage service. A good // starting point may be something like (60 seconds per MB of anticipated-payload-size). TryTimeout time.Duration // RetryDelay specifies the amount of delay to use before retrying an operation (0=default). // When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially // with each retry up to a maximum specified by MaxRetryDelay. // If you specify 0, then you must also specify 0 for MaxRetryDelay. // If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be // equal to or greater than RetryDelay. RetryDelay time.Duration // MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default). // If you specify 0, then you must also specify 0 for RetryDelay. MaxRetryDelay time.Duration // RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host. // If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host. // NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent // data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs } func (o RetryOptions) retryReadsFromSecondaryHost() string { return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only //return "" // This is for non-blob SDKs } func (o RetryOptions) defaults() RetryOptions { // We assume the following: // 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed // 2. o.MaxTries >= 0 // 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0 // 4. o.RetryDelay <= o.MaxRetryDelay // 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0 IfDefault := func(current *time.Duration, desired time.Duration) { if *current == time.Duration(0) { *current = desired } } // Set defaults if unspecified if o.MaxTries == 0 { o.MaxTries = 4 } switch o.Policy { case RetryPolicyExponential: IfDefault(&o.TryTimeout, 1*time.Minute) IfDefault(&o.RetryDelay, 4*time.Second) IfDefault(&o.MaxRetryDelay, 120*time.Second) case RetryPolicyFixed: IfDefault(&o.TryTimeout, 1*time.Minute) IfDefault(&o.RetryDelay, 30*time.Second) IfDefault(&o.MaxRetryDelay, 120*time.Second) } return o } func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0 pow := func(number int64, exponent int32) int64 { // pow is nested helper function var result int64 = 1 for n := int32(0); n < exponent; n++ { result *= number } return result } delay := time.Duration(0) switch o.Policy { case RetryPolicyExponential: delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay case RetryPolicyFixed: if try > 1 { // Any try after the 1st uses the fixed delay delay = o.RetryDelay } } // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757 delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand if delay > o.MaxRetryDelay { delay = o.MaxRetryDelay } return delay } // NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options. func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { o = o.defaults() // Force defaults to be calculated return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { // Before each try, we'll select either the primary or secondary URL. primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable // If using a secondary: // Even tries go against primary; odd tries go against the secondary // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) // If secondary gets a 404, don't fail, retry but future retries are only against the primary // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) for try := int32(1); try <= o.MaxTries; try++ { logf("\n=====> Try=%d\n", try) // Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt. tryingPrimary := !considerSecondary || (try%2 == 1) // Select the correct host and delay if tryingPrimary { primaryTry++ delay := o.calcDelay(primaryTry) logf("Primary try=%d, Delay=%v\n", primaryTry, delay) time.Sleep(delay) // The 1st try returns 0 delay } else { // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757 delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8)) logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay) time.Sleep(delay) // Delay with some jitter before trying secondary } // Clone the original request to ensure that each try starts with the original (unmutated) request. requestCopy := request.Copy() // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because // the stream may not be at offset 0 when we first get it and we want the same behavior for the // 1st try as for additional tries. err = requestCopy.RewindBody() if err != nil { return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption") } if !tryingPrimary { requestCopy.URL.Host = o.retryReadsFromSecondaryHost() requestCopy.Host = o.retryReadsFromSecondaryHost() } // Set the server-side timeout query parameter "timeout=[seconds]" timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) if t < timeout { timeout = t } if timeout < 0 { timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging } logf("TryTimeout adjusted to=%d sec\n", timeout) } q := requestCopy.Request.URL.Query() q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up" requestCopy.Request.URL.RawQuery = q.Encode() logf("Url=%s\n", requestCopy.Request.URL.String()) // Set the time for this particular retry operation and then Do the operation. tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout)) //requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body} response, err = next.Do(tryCtx, requestCopy) // Make the request /*err = improveDeadlineExceeded(err) if err == nil { response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body} }*/ logf("Err=%v, response=%v\n", err, response) action := "" // This MUST get changed within the switch code below switch { case ctx.Err() != nil: action = "NoRetry: Op timeout" case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound: // If attempt was against the secondary & it returned a StatusNotFound (404), then // the resource was not found. This may be due to replication delay. So, in this // case, we'll never try the secondary again for this operation. considerSecondary = false action = "Retry: Secondary URL returned 404" case err != nil: // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation. // Use ServiceCode to verify if the error is related to storage service-side, // ServiceCode is set only when error related to storage service happened. if stErr, ok := err.(StorageError); ok { if stErr.Temporary() { action = "Retry: StorageError with error service code and Temporary()" } else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError action = "Retry: StorageError with success status code" } else { action = "NoRetry: StorageError not Temporary() and without retriable status code" } } else if netErr, ok := err.(net.Error); ok { // Use non-retriable net.Error list, but not retriable list. // As there are errors without Temporary() implementation, // while need be retried, like 'connection reset by peer', 'transport connection broken' and etc. // So the SDK do retry for most of the case, unless the error should not be retried for sure. if !isNotRetriable(netErr) { action = "Retry: net.Error and not in the non-retriable list" } else { action = "NoRetry: net.Error and in the non-retriable list" } } else if err == io.ErrUnexpectedEOF { action = "Retry: unexpected EOF" } else { action = "NoRetry: unrecognized error" } default: action = "NoRetry: successful HTTP request" // no error } logf("Action=%s\n", action) // fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying if action[0] != 'R' { // Retry only if action starts with 'R' if err != nil { tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context } else { // We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper. // So, when the user closes the Body, the our per-try context gets closed too. // Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context) if response == nil || response.Response() == nil { // We do panic in the case response or response.Response() is nil, // as for client, the response should not be nil if request is sent and the operations is executed successfully. // Another option, is that execute the cancel function when response or response.Response() is nil, // as in this case, current per-try has nothing to do in future. return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully") } response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body} } break // Don't retry } if response != nil && response.Response() != nil && response.Response().Body != nil { // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection body := response.Response().Body io.Copy(ioutil.Discard, body) body.Close() } // If retrying, cancel the current per-try timeout context tryCancel() } return response, err // Not retryable or too many retries; return the last response/error } }) } // contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed. type contextCancelReadCloser struct { cf context.CancelFunc body io.ReadCloser } func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { return rc.body.Read(p) } func (rc *contextCancelReadCloser) Close() error { err := rc.body.Close() if rc.cf != nil { rc.cf() } return err } // isNotRetriable checks if the provided net.Error isn't retriable. func isNotRetriable(errToParse net.Error) bool { // No error, so this is NOT retriable. if errToParse == nil { return true } // The error is either temporary or a timeout so it IS retriable (not not retriable). if errToParse.Temporary() || errToParse.Timeout() { return false } genericErr := error(errToParse) // From here all the error are neither Temporary() nor Timeout(). switch err := errToParse.(type) { case *net.OpError: // The net.Error is also a net.OpError but the inner error is nil, so this is not retriable. if err.Err == nil { return true } genericErr = err.Err } switch genericErr.(type) { case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError: // If the error is one of the ones listed, then it is NOT retriable. return true } // If it's invalid header field name/value error thrown by http module, then it is NOT retriable. // This could happen when metadata's key or value is invalid. (RoundTrip in transport.go) if strings.Contains(genericErr.Error(), "invalid header field") { return true } // Assume the error is retriable. return false } var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent} func isSuccessStatusCode(resp *http.Response) bool { if resp == nil { return false } for _, i := range successStatusCodes { if i == resp.StatusCode { return true } } return false } // According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away var logf = func(format string, a ...interface{}) {} // Use this version to see the retry method's code path (import "fmt") //var logf = fmt.Printf /* type deadlineExceededReadCloser struct { r io.ReadCloser } func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { n, err := 0, io.EOF if r.r != nil { n, err = r.r.Read(p) } return n, improveDeadlineExceeded(err) } func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { // For an HTTP request, the ReadCloser MUST also implement seek // For an HTTP response, Seek MUST not be called (or this will panic) o, err := r.r.(io.Seeker).Seek(offset, whence) return o, improveDeadlineExceeded(err) } func (r *deadlineExceededReadCloser) Close() error { if c, ok := r.r.(io.Closer); ok { c.Close() } return nil } // timeoutError is the internal struct that implements our richer timeout error. type deadlineExceeded struct { responseError } var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time // improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error. func improveDeadlineExceeded(cause error) error { // If cause is not DeadlineExceeded, return the same error passed in. if cause != context.DeadlineExceeded { return cause } // Else, convert DeadlineExceeded to our timeoutError which gives a richer string message return &deadlineExceeded{ responseError: responseError{ ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), }, } } // Error implements the error interface's Error method to return a string representation of the error. func (e *deadlineExceeded) Error() string { return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field") } */ azure-storage-blob-go-0.10.0/azblob/zc_policy_telemetry.go000066400000000000000000000032231367515646300236040ustar00rootroot00000000000000package azblob import ( "bytes" "context" "fmt" "os" "runtime" "github.com/Azure/azure-pipeline-go/pipeline" ) // TelemetryOptions configures the telemetry policy's behavior. type TelemetryOptions struct { // Value is a string prepended to each request's User-Agent and sent to the service. // The service records the user-agent in logs for diagnostics and tracking of client requests. Value string } // NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects // which add telemetry information to outgoing HTTP requests. func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { b := &bytes.Buffer{} b.WriteString(o.Value) if b.Len() > 0 { b.WriteRune(' ') } fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) telemetryValue := b.String() return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { request.Header.Set("User-Agent", telemetryValue) return next.Do(ctx, request) } }) } // NOTE: the ONLY function that should write to this variable is this func var platformInfo = func() string { // Azure-Storage/version (runtime; os type and version)” // Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)' operatingSystem := runtime.GOOS // Default OS string switch operatingSystem { case "windows": operatingSystem = os.Getenv("OS") // Get more specific OS information case "linux": // accept default OS info case "freebsd": // accept default OS info } return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) }() azure-storage-blob-go-0.10.0/azblob/zc_policy_unique_request_id.go000066400000000000000000000015051367515646300253250ustar00rootroot00000000000000package azblob import ( "context" "github.com/Azure/azure-pipeline-go/pipeline" ) // NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object // that sets the request's x-ms-client-request-id header if it doesn't already exist. func NewUniqueRequestIDPolicyFactory() pipeline.Factory { return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { // This is Policy's Do method: return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { id := request.Header.Get(xMsClientRequestID) if id == "" { // Add a unique request ID if the caller didn't specify one already request.Header.Set(xMsClientRequestID, newUUID().String()) } return next.Do(ctx, request) } }) } const xMsClientRequestID = "x-ms-client-request-id" azure-storage-blob-go-0.10.0/azblob/zc_retry_reader.go000066400000000000000000000165031367515646300227070ustar00rootroot00000000000000package azblob import ( "context" "io" "net" "net/http" "strings" "sync" ) const CountToEnd = 0 // HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) // HTTPGetterInfo is passed to an HTTPGetter function passing it parameters // that should be used to make an HTTP GET request. type HTTPGetterInfo struct { // Offset specifies the start offset that should be used when // creating the HTTP GET request's Range header Offset int64 // Count specifies the count of bytes that should be used to calculate // the end offset when creating the HTTP GET request's Range header Count int64 // ETag specifies the resource's etag that should be used when creating // the HTTP GET request's If-Match header ETag ETag } // FailedReadNotifier is a function type that represents the notification function called when a read fails type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) // RetryReaderOptions contains properties which can help to decide when to do retry. type RetryReaderOptions struct { // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made // while reading from a RetryReader. A value of zero means that no additional HTTP // GET requests will be made. MaxRetryRequests int doInjectError bool doInjectErrorRound int injectedError error // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. NotifyFailedRead FailedReadNotifier // TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, // retryReader has the following special behaviour: closing the response body before it is all read is treated as a // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead // treated as a fatal (non-retryable) error. // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors // which will be retried. TreatEarlyCloseAsError bool } // retryReader implements io.ReaderCloser methods. // retryReader tries to read from response, and if there is retriable network error // returned during reading, it will retry according to retry reader option through executing // user defined action with provided data to get a new response, and continue the overall reading process // through reading from the new response. type retryReader struct { ctx context.Context info HTTPGetterInfo countWasBounded bool o RetryReaderOptions getter HTTPGetter // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response responseMu *sync.Mutex response *http.Response } // NewRetryReader creates a retry reader. func NewRetryReader(ctx context.Context, initialResponse *http.Response, info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { return &retryReader{ ctx: ctx, getter: getter, info: info, countWasBounded: info.Count != CountToEnd, response: initialResponse, responseMu: &sync.Mutex{}, o: o} } func (s *retryReader) setResponse(r *http.Response) { s.responseMu.Lock() defer s.responseMu.Unlock() s.response = r } func (s *retryReader) Read(p []byte) (n int, err error) { for try := 0; ; try++ { //fmt.Println(try) // Comment out for debugging. if s.countWasBounded && s.info.Count == CountToEnd { // User specified an original count and the remaining bytes are 0, return 0, EOF return 0, io.EOF } s.responseMu.Lock() resp := s.response s.responseMu.Unlock() if resp == nil { // We don't have a response stream to read from, try to get one. newResponse, err := s.getter(s.ctx, s.info) if err != nil { return 0, err } // Successful GET; this is the network stream we'll read from. s.setResponse(newResponse) resp = newResponse } n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) // Injection mechanism for testing. if s.o.doInjectError && try == s.o.doInjectErrorRound { if s.o.injectedError != nil { err = s.o.injectedError } else { err = &net.DNSError{IsTemporary: true} } } // We successfully read data or end EOF. if err == nil || err == io.EOF { s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future if s.info.Count != CountToEnd { s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future } return n, err // Return the return to the caller } s.Close() // Error, close stream s.setResponse(nil) // Our stream is no longer good // Check the retry count and error code, and decide whether to retry. retriesExhausted := try >= s.o.MaxRetryRequests _, isNetError := err.(net.Error) isUnexpectedEOF := err == io.ErrUnexpectedEOF willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted // Notify, for logging purposes, of any failures if s.o.NotifyFailedRead != nil { failureCount := try + 1 // because try is zero-based s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) } if willRetry { continue // Loop around and try to get and read from new stream. } return n, err // Not retryable, or retries exhausted, so just return } } // By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry // Is this safe, to close early from another goroutine? Early close ultimately ends up calling // net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" // which is exactly the behaviour we want. // NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) // then there are two different types of error that may happen - either the one one we check for here, // or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine // to check for one, since the other is a net.Error, which our main Read retry loop is already handing. func (s *retryReader) wasRetryableEarlyClose(err error) bool { if s.o.TreatEarlyCloseAsError { return false // user wants all early closes to be errors, and so not retryable } // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) } const ReadOnClosedBodyMessage = "read on closed response body" func (s *retryReader) Close() error { s.responseMu.Lock() defer s.responseMu.Unlock() if s.response != nil && s.response.Body != nil { return s.response.Body.Close() } return nil } azure-storage-blob-go-0.10.0/azblob/zc_sas_account.go000066400000000000000000000142351367515646300225220ustar00rootroot00000000000000package azblob import ( "bytes" "errors" "fmt" "strings" "time" ) // AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas type AccountSASSignatureValues struct { Version string `param:"sv"` // If not specified, this defaults to SASVersion Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants StartTime time.Time `param:"st"` // Not specified if IsZero ExpiryTime time.Time `param:"se"` // Not specified if IsZero Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() IPRange IPRange `param:"sip"` Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() } // NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce // the proper SAS query parameters. func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") } if v.Version == "" { v.Version = SASVersion } perms := &AccountSASPermissions{} if err := perms.Parse(v.Permissions); err != nil { return SASQueryParameters{}, err } v.Permissions = perms.String() startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) stringToSign := strings.Join([]string{ sharedKeyCredential.AccountName(), v.Permissions, v.Services, v.ResourceTypes, startTime, expiryTime, v.IPRange.String(), string(v.Protocol), v.Version, ""}, // That right, the account SAS requires a terminating extra newline "\n") signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) p := SASQueryParameters{ // Common SAS parameters version: v.Version, protocol: v.Protocol, startTime: v.StartTime, expiryTime: v.ExpiryTime, permissions: v.Permissions, ipRange: v.IPRange, // Account-specific SAS parameters services: v.Services, resourceTypes: v.ResourceTypes, // Calculated SAS signature signature: signature, } return p, nil } // The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. // Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. type AccountSASPermissions struct { Read, Write, Delete, List, Add, Create, Update, Process bool } // String produces the SAS permissions string for an Azure Storage account. // Call this method to set AccountSASSignatureValues's Permissions field. func (p AccountSASPermissions) String() string { var buffer bytes.Buffer if p.Read { buffer.WriteRune('r') } if p.Write { buffer.WriteRune('w') } if p.Delete { buffer.WriteRune('d') } if p.List { buffer.WriteRune('l') } if p.Add { buffer.WriteRune('a') } if p.Create { buffer.WriteRune('c') } if p.Update { buffer.WriteRune('u') } if p.Process { buffer.WriteRune('p') } return buffer.String() } // Parse initializes the AccountSASPermissions's fields from a string. func (p *AccountSASPermissions) Parse(s string) error { *p = AccountSASPermissions{} // Clear out the flags for _, r := range s { switch r { case 'r': p.Read = true case 'w': p.Write = true case 'd': p.Delete = true case 'l': p.List = true case 'a': p.Add = true case 'c': p.Create = true case 'u': p.Update = true case 'p': p.Process = true default: return fmt.Errorf("Invalid permission character: '%v'", r) } } return nil } // The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. // Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. type AccountSASServices struct { Blob, Queue, File bool } // String produces the SAS services string for an Azure Storage account. // Call this method to set AccountSASSignatureValues's Services field. func (s AccountSASServices) String() string { var buffer bytes.Buffer if s.Blob { buffer.WriteRune('b') } if s.Queue { buffer.WriteRune('q') } if s.File { buffer.WriteRune('f') } return buffer.String() } // Parse initializes the AccountSASServices' fields from a string. func (a *AccountSASServices) Parse(s string) error { *a = AccountSASServices{} // Clear out the flags for _, r := range s { switch r { case 'b': a.Blob = true case 'q': a.Queue = true case 'f': a.File = true default: return fmt.Errorf("Invalid service character: '%v'", r) } } return nil } // The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. // Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. type AccountSASResourceTypes struct { Service, Container, Object bool } // String produces the SAS resource types string for an Azure Storage account. // Call this method to set AccountSASSignatureValues's ResourceTypes field. func (rt AccountSASResourceTypes) String() string { var buffer bytes.Buffer if rt.Service { buffer.WriteRune('s') } if rt.Container { buffer.WriteRune('c') } if rt.Object { buffer.WriteRune('o') } return buffer.String() } // Parse initializes the AccountSASResourceType's fields from a string. func (rt *AccountSASResourceTypes) Parse(s string) error { *rt = AccountSASResourceTypes{} // Clear out the flags for _, r := range s { switch r { case 's': rt.Service = true case 'c': rt.Container = true case 'o': rt.Object = true default: return fmt.Errorf("Invalid resource type: '%v'", r) } } return nil } azure-storage-blob-go-0.10.0/azblob/zc_sas_query_params.go000066400000000000000000000236361367515646300236030ustar00rootroot00000000000000package azblob import ( "errors" "net" "net/url" "strings" "time" ) // SASVersion indicates the SAS version. const SASVersion = ServiceVersion type SASProtocol string const ( // SASProtocolHTTPS can be specified for a SAS protocol SASProtocolHTTPS SASProtocol = "https" // SASProtocolHTTPSandHTTP can be specified for a SAS protocol SASProtocolHTTPSandHTTP SASProtocol = "https,http" ) // FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a // SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { ss := "" if !startTime.IsZero() { ss = formatSASTimeWithDefaultFormat(&startTime) } se := "" if !expiryTime.IsZero() { se = formatSASTimeWithDefaultFormat(&expiryTime) } sh := "" if !snapshotTime.IsZero() { sh = snapshotTime.Format(SnapshotTimeFormat) } return ss, se, sh } // SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. // formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". func formatSASTimeWithDefaultFormat(t *time.Time) string { return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used } // formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. func formatSASTime(t *time.Time, format string) string { if format != "" { return t.Format(format) } return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used } // parseSASTimeString try to parse sas time string. func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) { for _, sasTimeFormat := range SASTimeFormats { t, err = time.Parse(sasTimeFormat, val) if err == nil { timeFormat = sasTimeFormat break } } if err != nil { err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") } return } // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas // A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. // You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components // to a query parameter map by calling AddToValues(). // NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. // // This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). type SASQueryParameters struct { // All members are immutable or values so copies of this struct are goroutine-safe. version string `param:"sv"` services string `param:"ss"` resourceTypes string `param:"srt"` protocol SASProtocol `param:"spr"` startTime time.Time `param:"st"` expiryTime time.Time `param:"se"` snapshotTime time.Time `param:"snapshot"` ipRange IPRange `param:"sip"` identifier string `param:"si"` resource string `param:"sr"` permissions string `param:"sp"` signature string `param:"sig"` cacheControl string `param:"rscc"` contentDisposition string `param:"rscd"` contentEncoding string `param:"rsce"` contentLanguage string `param:"rscl"` contentType string `param:"rsct"` signedOid string `param:"skoid"` signedTid string `param:"sktid"` signedStart time.Time `param:"skt"` signedExpiry time.Time `param:"ske"` signedService string `param:"sks"` signedVersion string `param:"skv"` // private member used for startTime and expiryTime formatting. stTimeFormat string seTimeFormat string } func (p *SASQueryParameters) SignedOid() string { return p.signedOid } func (p *SASQueryParameters) SignedTid() string { return p.signedTid } func (p *SASQueryParameters) SignedStart() time.Time { return p.signedStart } func (p *SASQueryParameters) SignedExpiry() time.Time { return p.signedExpiry } func (p *SASQueryParameters) SignedService() string { return p.signedService } func (p *SASQueryParameters) SignedVersion() string { return p.signedVersion } func (p *SASQueryParameters) SnapshotTime() time.Time { return p.snapshotTime } func (p *SASQueryParameters) Version() string { return p.version } func (p *SASQueryParameters) Services() string { return p.services } func (p *SASQueryParameters) ResourceTypes() string { return p.resourceTypes } func (p *SASQueryParameters) Protocol() SASProtocol { return p.protocol } func (p *SASQueryParameters) StartTime() time.Time { return p.startTime } func (p *SASQueryParameters) ExpiryTime() time.Time { return p.expiryTime } func (p *SASQueryParameters) IPRange() IPRange { return p.ipRange } func (p *SASQueryParameters) Identifier() string { return p.identifier } func (p *SASQueryParameters) Resource() string { return p.resource } func (p *SASQueryParameters) Permissions() string { return p.permissions } func (p *SASQueryParameters) Signature() string { return p.signature } func (p *SASQueryParameters) CacheControl() string { return p.cacheControl } func (p *SASQueryParameters) ContentDisposition() string { return p.contentDisposition } func (p *SASQueryParameters) ContentEncoding() string { return p.contentEncoding } func (p *SASQueryParameters) ContentLanguage() string { return p.contentLanguage } func (p *SASQueryParameters) ContentType() string { return p.contentType } // IPRange represents a SAS IP range's start IP and (optionally) end IP. type IPRange struct { Start net.IP // Not specified if length = 0 End net.IP // Not specified if length = 0 } // String returns a string representation of an IPRange. func (ipr *IPRange) String() string { if len(ipr.Start) == 0 { return "" } start := ipr.Start.String() if len(ipr.End) == 0 { return start } return start + "-" + ipr.End.String() } // NewSASQueryParameters creates and initializes a SASQueryParameters object based on the // query parameter map's passed-in values. If deleteSASParametersFromValues is true, // all SAS-related query parameters are removed from the passed-in map. If // deleteSASParametersFromValues is false, the map passed-in map is unaltered. func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { p := SASQueryParameters{} for k, v := range values { val := v[0] isSASKey := true switch strings.ToLower(k) { case "sv": p.version = val case "ss": p.services = val case "srt": p.resourceTypes = val case "spr": p.protocol = SASProtocol(val) case "snapshot": p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) case "st": p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) case "se": p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) case "sip": dashIndex := strings.Index(val, "-") if dashIndex == -1 { p.ipRange.Start = net.ParseIP(val) } else { p.ipRange.Start = net.ParseIP(val[:dashIndex]) p.ipRange.End = net.ParseIP(val[dashIndex+1:]) } case "si": p.identifier = val case "sr": p.resource = val case "sp": p.permissions = val case "sig": p.signature = val case "rscc": p.cacheControl = val case "rscd": p.contentDisposition = val case "rsce": p.contentEncoding = val case "rscl": p.contentLanguage = val case "rsct": p.contentType = val case "skoid": p.signedOid = val case "sktid": p.signedTid = val case "skt": p.signedStart, _ = time.Parse(SASTimeFormat, val) case "ske": p.signedExpiry, _ = time.Parse(SASTimeFormat, val) case "sks": p.signedService = val case "skv": p.signedVersion = val default: isSASKey = false // We didn't recognize the query parameter } if isSASKey && deleteSASParametersFromValues { delete(values, k) } } return p } // AddToValues adds the SAS components to the specified query parameters map. func (p *SASQueryParameters) addToValues(v url.Values) url.Values { if p.version != "" { v.Add("sv", p.version) } if p.services != "" { v.Add("ss", p.services) } if p.resourceTypes != "" { v.Add("srt", p.resourceTypes) } if p.protocol != "" { v.Add("spr", string(p.protocol)) } if !p.startTime.IsZero() { v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat)) } if !p.expiryTime.IsZero() { v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat)) } if len(p.ipRange.Start) > 0 { v.Add("sip", p.ipRange.String()) } if p.identifier != "" { v.Add("si", p.identifier) } if p.resource != "" { v.Add("sr", p.resource) } if p.permissions != "" { v.Add("sp", p.permissions) } if p.signedOid != "" { v.Add("skoid", p.signedOid) v.Add("sktid", p.signedTid) v.Add("skt", p.signedStart.Format(SASTimeFormat)) v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) v.Add("sks", p.signedService) v.Add("skv", p.signedVersion) } if p.signature != "" { v.Add("sig", p.signature) } if p.cacheControl != "" { v.Add("rscc", p.cacheControl) } if p.contentDisposition != "" { v.Add("rscd", p.contentDisposition) } if p.contentEncoding != "" { v.Add("rsce", p.contentEncoding) } if p.contentLanguage != "" { v.Add("rscl", p.contentLanguage) } if p.contentType != "" { v.Add("rsct", p.contentType) } return v } // Encode encodes the SAS query parameters into URL encoded form sorted by key. func (p *SASQueryParameters) Encode() string { v := url.Values{} p.addToValues(v) return v.Encode() } azure-storage-blob-go-0.10.0/azblob/zc_service_codes_common.go000066400000000000000000000177061367515646300244130ustar00rootroot00000000000000package azblob // https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes const ( // ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code. ServiceCodeNone ServiceCodeType = "" // ServiceCodeAccountAlreadyExists means the specified account already exists. ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" // ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403). ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" // ServiceCodeAccountIsDisabled means the specified account is disabled (403). ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" // ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403). ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" // ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400). ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" // ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412). ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" // ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400). ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" // ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403). ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" // ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500). ServiceCodeInternalError ServiceCodeType = "InternalError" // ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400). ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" // ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400). ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" // ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400). ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" // ServiceCodeInvalidInput means one of the request inputs is not valid (400). ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" // ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400). ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" // ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400). ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" // ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400). ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" // ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416). ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" // ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400). ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" // ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400). ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" // ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400). ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" // ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400). ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" // ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400). ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" // ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400). ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" // ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411). ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" // ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400). ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" // ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400). ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" // ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400). ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" // ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400). ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" // ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500). ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" // ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400). ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" // ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400). ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" // ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413). ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" // ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409). ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" // ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400). ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" // ServiceCodeResourceAlreadyExists means the specified resource already exists (409). ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" // ServiceCodeResourceNotFound means the specified resource does not exist (404). ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503). ServiceCodeServerBusy ServiceCodeType = "ServerBusy" // ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400). ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" // ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400). ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" // ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400). ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" // ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405). ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" ) azure-storage-blob-go-0.10.0/azblob/zc_storage_error.go000066400000000000000000000065701367515646300231000ustar00rootroot00000000000000package azblob import ( "bytes" "encoding/xml" "fmt" "net/http" "sort" "github.com/Azure/azure-pipeline-go/pipeline" ) func init() { // wire up our custom error handling constructor responseErrorFactory = newStorageError } // ServiceCodeType is a string identifying a storage service error. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2 type ServiceCodeType string // StorageError identifies a responder-generated network or response parsing error. type StorageError interface { // ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response(). ResponseError // ServiceCode returns a service error code. Your code can use this to make error recovery decisions. ServiceCode() ServiceCodeType } // storageError is the internal struct that implements the public StorageError interface. type storageError struct { responseError serviceCode ServiceCodeType details map[string]string } // newStorageError creates an error object that implements the error interface. func newStorageError(cause error, response *http.Response, description string) error { return &storageError{ responseError: responseError{ ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), response: response, description: description, }, serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")), } } // ServiceCode returns service-error information. The caller may examine these values but should not modify any of them. func (e *storageError) ServiceCode() ServiceCodeType { return e.serviceCode } // Error implements the error interface's Error method to return a string representation of the error. func (e *storageError) Error() string { b := &bytes.Buffer{} fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) fmt.Fprintf(b, "Description=%s, Details: ", e.description) if len(e.details) == 0 { b.WriteString("(none)\n") } else { b.WriteRune('\n') keys := make([]string, 0, len(e.details)) // Alphabetize the details for k := range e.details { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) } } req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) return e.ErrorNode.Error(b.String()) } // Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). func (e *storageError) Temporary() bool { if e.response != nil { if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) { return true } } return e.ErrorNode.Temporary() } // UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { tokName := "" var t xml.Token for t, err = d.Token(); err == nil; t, err = d.Token() { switch tt := t.(type) { case xml.StartElement: tokName = tt.Name.Local break case xml.CharData: switch tokName { case "Message": e.description = string(tt) default: if e.details == nil { e.details = map[string]string{} } e.details[tokName] = string(tt) } } } return nil } azure-storage-blob-go-0.10.0/azblob/zc_util_validate.go000066400000000000000000000033531367515646300230450ustar00rootroot00000000000000package azblob import ( "errors" "fmt" "io" "strconv" ) // httpRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange // which has an offset but na zero value count indicates from the offset to the resource's end. type httpRange struct { offset int64 count int64 } func (r httpRange) pointers() *string { if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance return nil // No specified range } endOffset := "" // if count == CountToEnd (0) if r.count > 0 { endOffset = strconv.FormatInt((r.offset+r.count)-1, 10) } dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset) return &dataRange } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long return 0, nil } err := validateSeekableStreamAt0(body) if err != nil { return 0, err } count, err := body.Seek(0, io.SeekEnd) if err != nil { return 0, errors.New("body stream must be seekable") } body.Seek(0, io.SeekStart) return count, nil } // return an error if body is not a valid seekable stream at 0 func validateSeekableStreamAt0(body io.ReadSeeker) error { if body == nil { // nil body's are "logically" seekable to 0 return nil } if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { // Help detect programmer error if err != nil { return errors.New("body stream must be seekable") } return errors.New("body stream must be set to position 0") } return nil } azure-storage-blob-go-0.10.0/azblob/zc_uuid.go000066400000000000000000000034601367515646300211640ustar00rootroot00000000000000package azblob import ( "crypto/rand" "fmt" "strconv" ) // The UUID reserved variants. const ( reservedNCS byte = 0x80 reservedRFC4122 byte = 0x40 reservedMicrosoft byte = 0x20 reservedFuture byte = 0x00 ) // A UUID representation compliant with specification in RFC 4122 document. type uuid [16]byte // NewUUID returns a new uuid using RFC 4122 algorithm. func newUUID() (u uuid) { u = uuid{} // Set all bits to randomly (or pseudo-randomly) chosen values. rand.Read(u[:]) u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) var version byte = 4 u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) return } // String returns an unparsed version of the generated UUID sequence. func (u uuid) String() string { return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) } // ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" // or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. func parseUUID(uuidStr string) uuid { char := func(hexString string) byte { i, _ := strconv.ParseUint(hexString, 16, 8) return byte(i) } if uuidStr[0] == '{' { uuidStr = uuidStr[1:] // Skip over the '{' } // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 uuidVal := uuid{ char(uuidStr[0:2]), char(uuidStr[2:4]), char(uuidStr[4:6]), char(uuidStr[6:8]), char(uuidStr[9:11]), char(uuidStr[11:13]), char(uuidStr[14:16]), char(uuidStr[16:18]), char(uuidStr[19:21]), char(uuidStr[21:23]), char(uuidStr[24:26]), char(uuidStr[26:28]), char(uuidStr[28:30]), char(uuidStr[30:32]), char(uuidStr[32:34]), char(uuidStr[34:36]), } return uuidVal } func (u uuid) bytes() []byte { return u[:] } azure-storage-blob-go-0.10.0/azblob/zt_doc.go000066400000000000000000000131111367515646300207760ustar00rootroot00000000000000// Copyright 2017 Microsoft Corporation. All rights reserved. // Use of this source code is governed by an MIT // license that can be found in the LICENSE file. /* Package azblob allows you to manipulate Azure Storage containers and blobs objects. URL Types The most common types you'll work with are the XxxURL types. The methods of these types make requests against the Azure Storage Service. - ServiceURL's methods perform operations on a storage account. - ContainerURL's methods perform operations on an account's container. - BlockBlobURL's methods perform operations on a container's block blob. - AppendBlobURL's methods perform operations on a container's append blob. - PageBlobURL's methods perform operations on a container's page blob. - BlobURL's methods perform operations on a container's blob regardless of the blob's type. Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own URL but it shares the same pipeline as the parent ServiceURL object. To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob. To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL respectively. These three types are all identical except for the methods they expose; each type exposes the methods relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL; this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL, the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You can easily switch between blob types (method sets) by calling a ToXxxBlobURL method. If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object with the same URL as the original but with the specified pipeline. Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that XxxURL objects share a lot of system resources making them very efficient. All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an example of how to do deal with errors. URL and Shared Access Signature Manipulation The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types for generating and parsing Shared Access Signature (SAS) - Use the AccountSASSignatureValues type to create a SAS for a storage account. - Use the BlobSASSignatureValues type to create a SAS for a container or blob. - Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters. To generate a SAS, you must use the SharedKeyCredential type. Credentials When creating a request pipeline, you must specify one of this package's credential types. - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS). - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this to generate Shared Access Signatures. HTTP Request Policy Factories This package defines several request policy factories for use with the pipeline package. Most applications will not use these factories directly; instead, the NewPipeline function creates these factories, initializes them (via the PipelineOptions type) and returns a pipeline object for use by the XxxURL objects. However, for advanced scenarios, developers can access these policy factories directly and even create their own and then construct their own pipeline in order to affect HTTP requests and responses performed by the XxxURL objects. For example, developers can introduce their own logging, random failures, request recording & playback for fast testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The possibilities are endless! Below are the request pipeline policy factory functions that are provided with this package: - NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests. - NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures. - NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests. - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures. Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. */ package azblob // TokenCredential Use this to access resources using Role-Based Access Control (RBAC). azure-storage-blob-go-0.10.0/azblob/zt_examples_test.go000066400000000000000000001600611367515646300231150ustar00rootroot00000000000000package azblob import ( "bytes" "context" "encoding/base64" "encoding/binary" "fmt" "io" "log" "net" "net/http" "net/url" "os" "strings" "time" "math/rand" "github.com/Azure/azure-pipeline-go/pipeline" ) // https://godoc.org/github.com/fluhus/godoc-tricks func accountInfo() (string, string) { return os.Getenv("ACCOUNT_NAME"), os.Getenv("ACCOUNT_KEY") } // This example shows how to get started using the Azure Storage Blob SDK for Go. func Example() { // From the Azure portal, get your Storage account's name and account key. accountName, accountKey := accountInfo() // Use your Storage account's name and key to create a credential object; this is used to access your account. credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } // Create a request pipeline that is used to process HTTP(S) requests and responses. It requires // your account credentials. In more advanced scenarios, you can configure telemetry, retry policies, // logging, and other options. Also, you can configure multiple request pipelines for different scenarios. p := NewPipeline(credential, PipelineOptions{}) // From the Azure portal, get your Storage account blob service URL endpoint. // The URL typically looks like this: u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName)) // Create an ServiceURL object that wraps the service URL and a request pipeline. serviceURL := NewServiceURL(*u, p) // Now, you can use the serviceURL to perform various container and blob operations. // All HTTP operations allow you to specify a Go context.Context object to control cancellation/timeout. ctx := context.Background() // This example uses a never-expiring context. // This example shows several common operations just to get you started. // Create a URL that references a to-be-created container in your Azure Storage account. // This returns a ContainerURL object that wraps the container's URL and a request pipeline (inherited from serviceURL) containerURL := serviceURL.NewContainerURL("mycontainer") // Container names require lowercase // Create the container on the service (with no metadata and no public access) _, err = containerURL.Create(ctx, Metadata{}, PublicAccessNone) if err != nil { log.Fatal(err) } // Create a URL that references a to-be-created blob in your Azure Storage account's container. // This returns a BlockBlobURL object that wraps the blob's URL and a request pipeline (inherited from containerURL) blobURL := containerURL.NewBlockBlobURL("HelloWorld.txt") // Blob names can be mixed case // Create the blob with string (plain text) content. data := "Hello World!" _, err = blobURL.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // Download the blob's contents and verify that it worked correctly get, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) if err != nil { log.Fatal(err) } downloadedData := &bytes.Buffer{} reader := get.Body(RetryReaderOptions{}) downloadedData.ReadFrom(reader) reader.Close() // The client must close the response body when finished with it if data != downloadedData.String() { log.Fatal("downloaded data doesn't match uploaded data") } // List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time. for marker := (Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error. // Get a result segment starting with the blob indicated by the current Marker. listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, ListBlobsSegmentOptions{}) if err != nil { log.Fatal(err) } // IMPORTANT: ListBlobs returns the start of the next segment; you MUST use this to get // the next segment (after processing the current result segment). marker = listBlob.NextMarker // Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute) for _, blobInfo := range listBlob.Segment.BlobItems { fmt.Print("Blob name: " + blobInfo.Name + "\n") } } // Delete the blob we created earlier. _, err = blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // Delete the container we created earlier. _, err = containerURL.Delete(ctx, ContainerAccessConditions{}) if err != nil { log.Fatal(err) } } // This example shows how you can configure a pipeline for making HTTP requests to the Azure Storage Blob Service. func ExampleNewPipeline() { // This example shows how to wire in your own logging mechanism (this example uses // Go's standard logger to write log information to standard error) logger := log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds) // Create/configure a request pipeline options object. // All PipelineOptions' fields are optional; reasonable defaults are set for anything you do not specify po := PipelineOptions{ // Set RetryOptions to control how HTTP request are retried when retryable failures occur Retry: RetryOptions{ Policy: RetryPolicyExponential, // Use exponential backoff as opposed to linear MaxTries: 3, // Try at most 3 times to perform the operation (set to 1 to disable retries) TryTimeout: time.Second * 3, // Maximum time allowed for any single try RetryDelay: time.Second * 1, // Backoff amount for each retry (exponential or linear) MaxRetryDelay: time.Second * 3, // Max delay between retries }, // Set RequestLogOptions to control how each HTTP request & its response is logged RequestLog: RequestLogOptions{ LogWarningIfTryOverThreshold: time.Millisecond * 200, // A successful response taking more than this time to arrive is logged as a warning }, // Set LogOptions to control what & where all pipeline log events go Log: pipeline.LogOptions{ Log: func(s pipeline.LogLevel, m string) { // This func is called to log each event // This method is not called for filtered-out severities. logger.Output(2, m) // This example uses Go's standard logger }, ShouldLog: func(level pipeline.LogLevel) bool { return level <= pipeline.LogWarning // Log all events from warning to more severe }, }, // Set HTTPSender to override the default HTTP Sender that sends the request over the network HTTPSender: pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { // Implement the HTTP client that will override the default sender. // For example, below HTTP client uses a transport that is different from http.DefaultTransport client := http.Client{ Transport: &http.Transport{ Proxy: nil, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }).DialContext, MaxIdleConns: 100, IdleConnTimeout: 180 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, }, } // Send the request over the network resp, err := client.Do(request.WithContext(ctx)) return pipeline.NewHTTPResponse(resp), err } }), } // Create a request pipeline object configured with credentials and with pipeline options. Once created, // a pipeline object is goroutine-safe and can be safely used with many XxxURL objects simultaneously. p := NewPipeline(NewAnonymousCredential(), po) // A pipeline always requires some credential object // Once you've created a pipeline object, associate it with an XxxURL object so that you can perform HTTP requests with it. u, _ := url.Parse("https://myaccount.blob.core.windows.net") serviceURL := NewServiceURL(*u, p) // Use the serviceURL as desired... // NOTE: When you use an XxxURL object to create another XxxURL object, the new XxxURL object inherits the // same pipeline object as its parent. For example, the containerURL and blobURL objects (created below) // all share the same pipeline. Any HTTP operations you perform with these objects share the behavior (retry, logging, etc.) containerURL := serviceURL.NewContainerURL("mycontainer") blobURL := containerURL.NewBlockBlobURL("ReadMe.txt") // If you'd like to perform some operations with different behavior, create a new pipeline object and // associate it with a new XxxURL object by passing the new pipeline to the XxxURL object's WithPipeline method. // In this example, I reconfigure the retry policies, create a new pipeline, and then create a new // ContainerURL object that has the same URL as its parent. po.Retry = RetryOptions{ Policy: RetryPolicyFixed, // Use fixed time backoff MaxTries: 4, // Try at most 3 times to perform the operation (set to 1 to disable retries) TryTimeout: time.Minute * 1, // Maximum time allowed for any single try RetryDelay: time.Second * 5, // Backoff amount for each retry (exponential or linear) MaxRetryDelay: time.Second * 10, // Max delay between retries } newContainerURL := containerURL.WithPipeline(NewPipeline(NewAnonymousCredential(), po)) // Now, any XxxBlobURL object created using newContainerURL inherits the pipeline with the new retry policy. newBlobURL := newContainerURL.NewBlockBlobURL("ReadMe.txt") _, _ = blobURL, newBlobURL // Avoid compiler's "declared and not used" error } func ExampleStorageError() { // This example shows how to handle errors returned from various XxxURL methods. All these methods return an // object implementing the pipeline.Response interface and an object implementing Go's error interface. // The error result is nil if the request was successful; your code can safely use the Response interface object. // If error is non-nil, the error could be due to: // 1. An invalid argument passed to the method. You should not write code to handle these errors; // instead, fix these errors as they appear during development/testing. // 2. A network request didn't reach an Azure Storage Service. This usually happens due to a bad URL or // faulty networking infrastructure (like a router issue). In this case, an object implementing the // net.Error interface will be returned. The net.Error interface offers Timeout and Temporary methods // which return true if the network error is determined to be a timeout or temporary condition. If // your pipeline uses the retry policy factory, then this policy looks for Timeout/Temporary and // automatically retries based on the retry options you've configured. Because of the retry policy, // your code will usually not call the Timeout/Temporary methods explicitly other than possibly logging // the network failure. // 3. A network request did reach the Azure Storage Service but the service failed to perform the // requested operation. In this case, an object implementing the StorageError interface is returned. // The StorageError interface also implements the net.Error interface and, if you use the retry policy, // you would most likely ignore the Timeout/Temporary methods. However, the StorageError interface exposes // richer information such as a service error code, an error description, details data, and the // service-returned http.Response. And, from the http.Response, you can get the initiating http.Request. u, _ := url.Parse("http://myaccount.blob.core.windows.net/mycontainer") containerURL := NewContainerURL(*u, NewPipeline(NewAnonymousCredential(), PipelineOptions{})) create, err := containerURL.Create(context.Background(), Metadata{}, PublicAccessNone) if err != nil { // An error occurred if stgErr, ok := err.(StorageError); ok { // This error is a Service-specific error // StorageError also implements net.Error so you could call its Timeout/Temporary methods if you want. switch stgErr.ServiceCode() { // Compare serviceCode to various ServiceCodeXxx constants case ServiceCodeContainerAlreadyExists: // You can also look at the http.Response object that failed. if failedResponse := stgErr.Response(); failedResponse != nil { // From the response object, you can get the initiating http.Request object failedRequest := failedResponse.Request _ = failedRequest // Avoid compiler's "declared and not used" error } case ServiceCodeContainerBeingDeleted: // Handle this error ... default: // Handle other errors ... } } log.Fatal(err) // Error is not due to Azure Storage service; networking infrastructure failure } // If err is nil, then the method was successful; use the response to access the result _ = create // Avoid compiler's "declared and not used" error } // This example shows how to break a URL into its parts so you can // examine and/or change some of its values and then construct a new URL. func ExampleBlobURLParts() { // Let's start with a URL that identifies a snapshot of a blob in a container. // The URL also contains a Shared Access Signature (SAS): u, _ := url.Parse("https://myaccount.blob.core.windows.net/mycontainter/ReadMe.txt?" + "snapshot=2011-03-09T01:42:34Z&" + "sv=2015-02-21&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&" + "spr=https,http&si=myIdentifier&ss=bf&srt=s&sig=92836758923659283652983562==") // You can parse this URL into its constituent parts: parts := NewBlobURLParts(*u) // Now, we access the parts (this example prints them). fmt.Println(parts.Host, parts.ContainerName, parts.BlobName, parts.Snapshot) sas := parts.SAS fmt.Println(sas.Version(), sas.Resource(), sas.StartTime(), sas.ExpiryTime(), sas.Permissions(), sas.IPRange(), sas.Protocol(), sas.Identifier(), sas.Services(), sas.Signature()) // You can then change some of the fields and construct a new URL: parts.SAS = SASQueryParameters{} // Remove the SAS query parameters parts.Snapshot = "" // Remove the snapshot timestamp parts.ContainerName = "othercontainer" // Change the container name // In this example, we'll keep the blob name as is. // Construct a new URL from the parts: newURL := parts.URL() fmt.Print(newURL.String()) // NOTE: You can pass the new URL to NewBlockBlobURL (or similar methods) to manipulate the blob. } // This example shows how to create and use an Azure Storage account Shared Access Signature (SAS). func ExampleAccountSASSignatureValues() { // From the Azure portal, get your Storage account's name and account key. accountName, accountKey := accountInfo() // Use your Storage account's name and key to create a credential object; this is required to sign a SAS. credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } // Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters. sasQueryParams, err := AccountSASSignatureValues{ Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration Permissions: AccountSASPermissions{Read: true, List: true}.String(), Services: AccountSASServices{Blob: true}.String(), ResourceTypes: AccountSASResourceTypes{Container: true, Object: true}.String(), }.NewSASQueryParameters(credential) if err != nil { log.Fatal(err) } qp := sasQueryParams.Encode() urlToSendToSomeone := fmt.Sprintf("https://%s.blob.core.windows.net?%s", accountName, qp) // At this point, you can send the urlToSendToSomeone to someone via email or any other mechanism you choose. // ************************************************************************************************ // When someone receives the URL, they access the SAS-protected resource with code like this: u, _ := url.Parse(urlToSendToSomeone) // Create an ServiceURL object that wraps the service URL (and its SAS) and a pipeline. // When using a SAS URLs, anonymous credentials are required. serviceURL := NewServiceURL(*u, NewPipeline(NewAnonymousCredential(), PipelineOptions{})) // Now, you can use this serviceURL just like any other to make requests of the resource. // You can parse a URL into its constituent parts: blobURLParts := NewBlobURLParts(serviceURL.URL()) fmt.Printf("SAS expiry time=%v", blobURLParts.SAS.ExpiryTime()) _ = serviceURL // Avoid compiler's "declared and not used" error } // This example shows how to create and use a Blob Service Shared Access Signature (SAS). func ExampleBlobSASSignatureValues() { // From the Azure portal, get your Storage account's name and account key. accountName, accountKey := accountInfo() // Use your Storage account's name and key to create a credential object; this is required to sign a SAS. credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } // This is the name of the container and blob that we're creating a SAS to. containerName := "mycontainer" // Container names require lowercase blobName := "HelloWorld.txt" // Blob names can be mixed case // Set the desired SAS signature values and sign them with the shared key credentials to get the SAS query parameters. sasQueryParams, err := BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration ContainerName: containerName, BlobName: blobName, // To produce a container SAS (as opposed to a blob SAS), assign to Permissions using // ContainerSASPermissions and make sure the BlobName field is "" (the default). Permissions: BlobSASPermissions{Add: true, Read: true, Write: true}.String(), }.NewSASQueryParameters(credential) if err != nil { log.Fatal(err) } // Create the URL of the resource you wish to access and append the SAS query parameters. // Since this is a blob SAS, the URL is to the Azure storage blob. qp := sasQueryParams.Encode() urlToSendToSomeone := fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s?%s", accountName, containerName, blobName, qp) // At this point, you can send the urlToSendToSomeone to someone via email or any other mechanism you choose. // ************************************************************************************************ // When someone receives the URL, they access the SAS-protected resource with code like this: u, _ := url.Parse(urlToSendToSomeone) // Create an BlobURL object that wraps the blob URL (and its SAS) and a pipeline. // When using a SAS URLs, anonymous credentials are required. blobURL := NewBlobURL(*u, NewPipeline(NewAnonymousCredential(), PipelineOptions{})) // Now, you can use this blobURL just like any other to make requests of the resource. // If you have a SAS query parameter string, you can parse it into its parts: blobURLParts := NewBlobURLParts(blobURL.URL()) fmt.Printf("SAS expiry time=%v", blobURLParts.SAS.ExpiryTime()) _ = blobURL // Avoid compiler's "declared and not used" error } // This example shows how to manipulate a container's permissions. func ExampleContainerURL_SetContainerAccessPolicy() { // From the Azure portal, get your Storage account's name and account key. accountName, accountKey := accountInfo() // Use your Storage account's name and key to create a credential object; this is used to access your account. credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } // Create an ContainerURL object that wraps the container's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName)) containerURL := NewContainerURL(*u, NewPipeline(credential, PipelineOptions{})) // All operations allow you to specify a timeout via a Go context.Context object. ctx := context.Background() // This example uses a never-expiring context // Create the container (with no metadata and no public access) _, err = containerURL.Create(ctx, Metadata{}, PublicAccessNone) if err != nil { log.Fatal(err) } // Create a URL that references a to-be-created blob in your Azure Storage account's container. // This returns a BlockBlobURL object that wraps the blob's URL and a request pipeline (inherited from containerURL) blobURL := containerURL.NewBlockBlobURL("HelloWorld.txt") // Blob names can be mixed case // Create the blob and put some text in it _, err = blobURL.Upload(ctx, strings.NewReader("Hello World!"), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // Attempt to read the blob via a simple HTTP GET operation rawBlobURL := blobURL.URL() get, err := http.Get(rawBlobURL.String()) if err != nil { log.Fatal(err) } if get.StatusCode == http.StatusNotFound { // We expected this error because the service returns an HTTP 404 status code when a blob // exists but the requester does not have permission to access it. // This is how we change the container's permission to allow public/anonymous aceess: _, err := containerURL.SetAccessPolicy(ctx, PublicAccessBlob, []SignedIdentifier{}, ContainerAccessConditions{}) if err != nil { log.Fatal(err) } // Now, this works: get, err = http.Get(rawBlobURL.String()) if err != nil { log.Fatal(err) } defer get.Body.Close() var text bytes.Buffer text.ReadFrom(get.Body) fmt.Print(text.String()) } } // This example shows how to perform operations on blob conditionally. func ExampleBlobAccessConditions() { // From the Azure portal, get your Storage account's name and account key. accountName, accountKey := accountInfo() // Create a BlockBlobURL object that wraps a blob's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/Data.txt", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } blobURL := NewBlockBlobURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context // This helper function displays the results of an operation; it is called frequently below. showResult := func(response pipeline.Response, err error) { if err != nil { if stgErr, ok := err.(StorageError); !ok { log.Fatal(err) // Network failure } else { fmt.Print("Failure: " + stgErr.Response().Status + "\n") } } else { if get, ok := response.(*DownloadResponse); ok { get.Body(RetryReaderOptions{}).Close() // The client must close the response body when finished with it } fmt.Print("Success: " + response.Response().Status + "\n") } } // Create the blob (unconditionally; succeeds) upload, err := blobURL.Upload(ctx, strings.NewReader("Text-1"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) showResult(upload, err) // Download blob content if the blob has been modified since we uploaded it (fails): showResult(blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: upload.LastModified()}}, false)) // Download blob content if the blob hasn't been modified in the last 24 hours (fails): showResult(blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: time.Now().UTC().Add(time.Hour * -24)}}, false)) // Upload new content if the blob hasn't changed since the version identified by ETag (succeeds): upload, err = blobURL.Upload(ctx, strings.NewReader("Text-2"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: upload.ETag()}}) showResult(upload, err) // Download content if it has changed since the version identified by ETag (fails): showResult(blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: upload.ETag()}}, false)) // Upload content if the blob doesn't already exist (fails): showResult(blobURL.Upload(ctx, strings.NewReader("Text-3"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETagAny}})) } // This examples shows how to create a container with metadata and then how to read & update the metadata. func ExampleMetadata_containers() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a ContainerURL object that wraps a soon-to-be-created container's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } containerURL := NewContainerURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context // Create a container with some metadata (string key/value pairs) // NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service. // Therefore, you should always use lowercase letters; especially when querying a map for a metadata key. creatingApp, _ := os.Executable() _, err = containerURL.Create(ctx, Metadata{"author": "Jeffrey", "app": creatingApp}, PublicAccessNone) if err != nil { log.Fatal(err) } // Query the container's metadata get, err := containerURL.GetProperties(ctx, LeaseAccessConditions{}) if err != nil { log.Fatal(err) } // Show the container's metadata metadata := get.NewMetadata() for k, v := range metadata { fmt.Print(k + "=" + v + "\n") } // Update the metadata and write it back to the container metadata["author"] = "Aidan" // NOTE: The keyname is in all lowercase letters _, err = containerURL.SetMetadata(ctx, metadata, ContainerAccessConditions{}) if err != nil { log.Fatal(err) } // NOTE: The SetMetadata & SetProperties methods update the container's ETag & LastModified properties } // This examples shows how to create a blob with metadata and then how to read & update // the blob's read-only properties and metadata. func ExampleMetadata_blobs() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/ReadMe.txt", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } blobURL := NewBlockBlobURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context // Create a blob with metadata (string key/value pairs) // NOTE: Metadata key names are always converted to lowercase before being sent to the Storage Service. // Therefore, you should always use lowercase letters; especially when querying a map for a metadata key. creatingApp, _ := os.Executable() _, err = blobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{}, Metadata{"author": "Jeffrey", "app": creatingApp}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // Query the blob's properties and metadata get, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // Show some of the blob's read-only properties fmt.Println(get.BlobType(), get.ETag(), get.LastModified()) // Show the blob's metadata metadata := get.NewMetadata() for k, v := range metadata { fmt.Print(k + "=" + v + "\n") } // Update the blob's metadata and write it back to the blob metadata["editor"] = "Grant" // Add a new key/value; NOTE: The keyname is in all lowercase letters _, err = blobURL.SetMetadata(ctx, metadata, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // NOTE: The SetMetadata method updates the blob's ETag & LastModified properties } // This examples shows how to create a blob with HTTP Headers and then how to read & update // the blob's HTTP headers. func ExampleBlobHTTPHeaders() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/ReadMe.txt", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } blobURL := NewBlockBlobURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context // Create a blob with HTTP headers _, err = blobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{ ContentType: "text/html; charset=utf-8", ContentDisposition: "attachment", }, Metadata{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // GetMetadata returns the blob's properties, HTTP headers, and metadata get, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // Show some of the blob's read-only properties fmt.Println(get.BlobType(), get.ETag(), get.LastModified()) // Shows some of the blob's HTTP Headers httpHeaders := get.NewHTTPHeaders() fmt.Println(httpHeaders.ContentType, httpHeaders.ContentDisposition) // Update the blob's HTTP Headers and write them back to the blob httpHeaders.ContentType = "text/plain" _, err = blobURL.SetHTTPHeaders(ctx, httpHeaders, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // NOTE: The SetMetadata method updates the blob's ETag & LastModified properties } // ExampleBlockBlobURL shows how to upload a lot of data (in blocks) to a blob. // A block blob can have a maximum of 50,000 blocks; each block can have a maximum of 100MB. // Therefore, the maximum size of a block blob is slightly more than 4.75 TB (100 MB X 50,000 blocks). func ExampleBlockBlobURL() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/MyBlockBlob.txt", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } blobURL := NewBlockBlobURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context // These helper functions convert a binary block ID to a base-64 string and vice versa // NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) } blockIDBase64ToBinary := func(blockID string) []byte { binary, _ := base64.StdEncoding.DecodeString(blockID); return binary } // These helper functions convert an int block ID to a base-64 string and vice versa blockIDIntToBase64 := func(blockID int) string { binaryBlockID := (&[4]byte{})[:] // All block IDs are 4 bytes long binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID)) return blockIDBinaryToBase64(binaryBlockID) } blockIDBase64ToInt := func(blockID string) int { blockIDBase64ToBinary(blockID) return int(binary.LittleEndian.Uint32(blockIDBase64ToBinary(blockID))) } // Upload 4 blocks to the blob (these blocks are tiny; they can be up to 100MB each) words := []string{"Azure ", "Storage ", "Block ", "Blob."} base64BlockIDs := make([]string, len(words)) // The collection of block IDs (base 64 strings) // Upload each block sequentially (one after the other); for better performance, you want to upload multiple blocks in parallel) for index, word := range words { // This example uses the index as the block ID; convert the index/ID into a base-64 encoded string as required by the service. // NOTE: Over the lifetime of a blob, all block IDs (before base 64 encoding) must be the same length (this example uses 4 byte block IDs). base64BlockIDs[index] = blockIDIntToBase64(index) // Some people use UUIDs for block IDs // Upload a block to this blob specifying the Block ID and its content (up to 100MB); this block is uncommitted. _, err := blobURL.StageBlock(ctx, base64BlockIDs[index], strings.NewReader(word), LeaseAccessConditions{}, nil) if err != nil { log.Fatal(err) } } // After all the blocks are uploaded, atomically commit them to the blob. _, err = blobURL.CommitBlockList(ctx, base64BlockIDs, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // For the blob, show each block (ID and size) that is a committed part of it. getBlock, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{}) if err != nil { log.Fatal(err) } for _, block := range getBlock.CommittedBlocks { fmt.Printf("Block ID=%d, Size=%d\n", blockIDBase64ToInt(block.Name), block.Size) } // Download the blob in its entirety; download operations do not take blocks into account. // NOTE: For really large blobs, downloading them like allocates a lot of memory. get, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) if err != nil { log.Fatal(err) } blobData := &bytes.Buffer{} reader := get.Body(RetryReaderOptions{}) blobData.ReadFrom(reader) reader.Close() // The client must close the response body when finished with it fmt.Println(blobData) } // ExampleAppendBlobURL shows how to append data (in blocks) to an append blob. // An append blob can have a maximum of 50,000 blocks; each block can have a maximum of 100MB. // Therefore, the maximum size of an append blob is slightly more than 4.75 TB (100 MB X 50,000 blocks). func ExampleAppendBlobURL() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/MyAppendBlob.txt", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } appendBlobURL := NewAppendBlobURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context _, err = appendBlobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } for i := 0; i < 5; i++ { // Append 5 blocks to the append blob _, err := appendBlobURL.AppendBlock(ctx, strings.NewReader(fmt.Sprintf("Appending block #%d\n", i)), AppendBlobAccessConditions{}, nil) if err != nil { log.Fatal(err) } } // Download the entire append blob's contents and show it. get, err := appendBlobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) if err != nil { log.Fatal(err) } b := bytes.Buffer{} reader := get.Body(RetryReaderOptions{}) b.ReadFrom(reader) reader.Close() // The client must close the response body when finished with it fmt.Println(b.String()) } // ExamplePageBlobURL shows how to manipulate a page blob with PageBlobURL. // A page blob is a collection of 512-byte pages optimized for random read and write operations. // The maximum size for a page blob is 8 TB. func ExamplePageBlobURL() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a ContainerURL object that wraps a soon-to-be-created blob's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/MyPageBlob.txt", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } blobURL := NewPageBlobURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context _, err = blobURL.Create(ctx, PageBlobPageBytes*4, 0, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } page := [PageBlobPageBytes]byte{} copy(page[:], "Page 0") _, err = blobURL.UploadPages(ctx, 0*PageBlobPageBytes, bytes.NewReader(page[:]), PageBlobAccessConditions{}, nil) if err != nil { log.Fatal(err) } copy(page[:], "Page 1") _, err = blobURL.UploadPages(ctx, 2*PageBlobPageBytes, bytes.NewReader(page[:]), PageBlobAccessConditions{}, nil) if err != nil { log.Fatal(err) } getPages, err := blobURL.GetPageRanges(ctx, 0*PageBlobPageBytes, 10*PageBlobPageBytes, BlobAccessConditions{}) if err != nil { log.Fatal(err) } for _, pr := range getPages.PageRange { fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End) } _, err = blobURL.ClearPages(ctx, 0*PageBlobPageBytes, 1*PageBlobPageBytes, PageBlobAccessConditions{}) if err != nil { log.Fatal(err) } getPages, err = blobURL.GetPageRanges(ctx, 0*PageBlobPageBytes, 10*PageBlobPageBytes, BlobAccessConditions{}) if err != nil { log.Fatal(err) } for _, pr := range getPages.PageRange { fmt.Printf("Start=%d, End=%d\n", pr.Start, pr.End) } get, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) if err != nil { log.Fatal(err) } blobData := &bytes.Buffer{} reader := get.Body(RetryReaderOptions{}) blobData.ReadFrom(reader) reader.Close() // The client must close the response body when finished with it fmt.Printf("%#v", blobData.Bytes()) } // This example show how to create a blob, take a snapshot of it, update the base blob, // read from the blob snapshot, list blobs with their snapshots, and hot to delete blob snapshots. func Example_blobSnapshots() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a ContainerURL object to a container where we'll create a blob and its snapshot. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } containerURL := NewContainerURL(*u, NewPipeline(credential, PipelineOptions{})) // Create a BlockBlobURL object to a blob in the container. baseBlobURL := containerURL.NewBlockBlobURL("Original.txt") ctx := context.Background() // This example uses a never-expiring context // Create the original blob: _, err = baseBlobURL.Upload(ctx, strings.NewReader("Some text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // Create a snapshot of the original blob & save its timestamp: createSnapshot, err := baseBlobURL.CreateSnapshot(ctx, Metadata{}, BlobAccessConditions{}) snapshot := createSnapshot.Snapshot() // Modify the original blob & show it: _, err = baseBlobURL.Upload(ctx, strings.NewReader("New text"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } get, err := baseBlobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) b := bytes.Buffer{} reader := get.Body(RetryReaderOptions{}) b.ReadFrom(reader) reader.Close() // The client must close the response body when finished with it fmt.Println(b.String()) // Show snapshot blob via original blob URI & snapshot time: snapshotBlobURL := baseBlobURL.WithSnapshot(snapshot) get, err = snapshotBlobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) b.Reset() reader = get.Body(RetryReaderOptions{}) b.ReadFrom(reader) reader.Close() // The client must close the response body when finished with it fmt.Println(b.String()) // FYI: You can get the base blob URL from one of its snapshot by passing "" to WithSnapshot: baseBlobURL = snapshotBlobURL.WithSnapshot("") // Show all blobs in the container with their snapshots: // List the blob(s) in our container; since a container may hold millions of blobs, this is done 1 segment at a time. for marker := (Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error. // Get a result segment starting with the blob indicated by the current Marker. listBlobs, err := containerURL.ListBlobsFlatSegment(ctx, marker, ListBlobsSegmentOptions{ Details: BlobListingDetails{Snapshots: true}}) if err != nil { log.Fatal(err) } // IMPORTANT: ListBlobs returns the start of the next segment; you MUST use this to get // the next segment (after processing the current result segment). marker = listBlobs.NextMarker // Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute) for _, blobInfo := range listBlobs.Segment.BlobItems { snaptime := "N/A" if blobInfo.Snapshot != "" { snaptime = blobInfo.Snapshot } fmt.Printf("Blob name: %s, Snapshot: %s\n", blobInfo.Name, snaptime) } } // Promote read-only snapshot to writable base blob: _, err = baseBlobURL.StartCopyFromURL(ctx, snapshotBlobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // When calling Delete on a base blob: // DeleteSnapshotsOptionOnly deletes all the base blob's snapshots but not the base blob itself // DeleteSnapshotsOptionInclude deletes the base blob & all its snapshots. // DeleteSnapshotOptionNone produces an error if the base blob has any snapshots. _, err = baseBlobURL.Delete(ctx, DeleteSnapshotsOptionInclude, BlobAccessConditions{}) if err != nil { log.Fatal(err) } } func Example_progressUploadDownload() { // Create a request pipeline using your Storage account's name and account key. accountName, accountKey := accountInfo() credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } p := NewPipeline(credential, PipelineOptions{}) // From the Azure portal, get your Storage account blob service URL endpoint. cURL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName)) // Create an ServiceURL object that wraps the service URL and a request pipeline to making requests. containerURL := NewContainerURL(*cURL, p) ctx := context.Background() // This example uses a never-expiring context // Here's how to create a blob with HTTP headers and metadata (I'm using the same metadata that was put on the container): blobURL := containerURL.NewBlockBlobURL("Data.bin") // requestBody is the stream of data to write requestBody := strings.NewReader("Some text to write") // Wrap the request body in a RequestBodyProgress and pass a callback function for progress reporting. _, err = blobURL.Upload(ctx, pipeline.NewRequestBodyProgress(requestBody, func(bytesTransferred int64) { fmt.Printf("Wrote %d of %d bytes.", bytesTransferred, requestBody.Size()) }), BlobHTTPHeaders{ ContentType: "text/html; charset=utf-8", ContentDisposition: "attachment", }, Metadata{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } // Here's how to read the blob's data with progress reporting: get, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) if err != nil { log.Fatal(err) } // Wrap the response body in a ResponseBodyProgress and pass a callback function for progress reporting. responseBody := pipeline.NewResponseBodyProgress(get.Body(RetryReaderOptions{}), func(bytesTransferred int64) { fmt.Printf("Read %d of %d bytes.", bytesTransferred, get.ContentLength()) }) downloadedData := &bytes.Buffer{} downloadedData.ReadFrom(responseBody) responseBody.Close() // The client must close the response body when finished with it // The downloaded blob data is in downloadData's buffer } // This example shows how to copy a source document on the Internet to a blob. func ExampleBlobURL_startCopy() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a ContainerURL object to a container where we'll create a blob and its snapshot. // Create a BlockBlobURL object to a blob in the container. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/CopiedBlob.bin", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } blobURL := NewBlobURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context src, _ := url.Parse("https://cdn2.auth0.com/docs/media/addons/azure_blob.svg") startCopy, err := blobURL.StartCopyFromURL(ctx, *src, nil, ModifiedAccessConditions{}, BlobAccessConditions{}) if err != nil { log.Fatal(err) } copyID := startCopy.CopyID() copyStatus := startCopy.CopyStatus() for copyStatus == CopyStatusPending { time.Sleep(time.Second * 2) getMetadata, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) if err != nil { log.Fatal(err) } copyStatus = getMetadata.CopyStatus() } fmt.Printf("Copy from %s to %s: ID=%s, Status=%s\n", src.String(), blobURL, copyID, copyStatus) } // This example shows how to copy a large stream in blocks (chunks) to a block blob. func ExampleUploadFileToBlockBlobAndDownloadItBack() { file, err := os.Open("BigFile.bin") // Open the file we want to upload if err != nil { log.Fatal(err) } defer file.Close() fileSize, err := file.Stat() // Get the size of the file (stream) if err != nil { log.Fatal(err) } // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a BlockBlobURL object to a blob in the container (we assume the container already exists). u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/BigBlockBlob.bin", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } blockBlobURL := NewBlockBlobURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context // Pass the Context, stream, stream size, block blob URL, and options to StreamToBlockBlob response, err := UploadFileToBlockBlob(ctx, file, blockBlobURL, UploadToBlockBlobOptions{ // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { fmt.Printf("Uploaded %d of %d bytes.\n", bytesTransferred, fileSize.Size()) }, }) if err != nil { log.Fatal(err) } _ = response // Avoid compiler's "declared and not used" error // Set up file to download the blob to destFileName := "BigFile-downloaded.bin" destFile, err := os.Create(destFileName) defer destFile.Close() // Perform download err = DownloadBlobToFile(context.Background(), blockBlobURL.BlobURL, 0, CountToEnd, destFile, DownloadFromBlobOptions{ // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { fmt.Printf("Downloaded %d of %d bytes.\n", bytesTransferred, fileSize.Size()) }}) if err != nil { log.Fatal(err) } } // This example shows how to download a large stream with intelligent retries. Specifically, if // the connection fails while reading, continuing to read from this stream initiates a new // GetBlob call passing a range that starts from the last byte successfully read before the failure. func ExampleBlobUrl_Download() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a BlobURL object to a blob in the container (we assume the container & blob already exist). u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/BigBlob.bin", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } blobURL := NewBlobURL(*u, NewPipeline(credential, PipelineOptions{})) contentLength := int64(0) // Used for progress reporting to report the total number of bytes being downloaded. // Download returns an intelligent retryable stream around a blob; it returns an io.ReadCloser. dr, err := blobURL.Download(context.TODO(), 0, -1, BlobAccessConditions{}, false) if err != nil { log.Fatal(err) } rs := dr.Body(RetryReaderOptions{}) // NewResponseBodyProgress wraps the GetRetryStream with progress reporting; it returns an io.ReadCloser. stream := pipeline.NewResponseBodyProgress(rs, func(bytesTransferred int64) { fmt.Printf("Downloaded %d of %d bytes.\n", bytesTransferred, contentLength) }) defer stream.Close() // The client must close the response body when finished with it file, err := os.Create("BigFile.bin") // Create the file to hold the downloaded blob contents. if err != nil { log.Fatal(err) } defer file.Close() written, err := io.Copy(file, stream) // Write to the file by reading from the blob (with intelligent retries). if err != nil { log.Fatal(err) } _ = written // Avoid compiler's "declared and not used" error } func ExampleUploadStreamToBlockBlob() { // From the Azure portal, get your Storage account blob service URL endpoint. accountName, accountKey := accountInfo() // Create a BlockBlobURL object to a blob in the container (we assume the container already exists). u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer/BigBlockBlob.bin", accountName)) credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } blockBlobURL := NewBlockBlobURL(*u, NewPipeline(credential, PipelineOptions{})) ctx := context.Background() // This example uses a never-expiring context // Create some data to test the upload stream blobSize := 8 * 1024 * 1024 data := make([]byte, blobSize) rand.Read(data) // Perform UploadStreamToBlockBlob bufferSize := 2 * 1024 * 1024 // Configure the size of the rotating buffers that are used when uploading maxBuffers := 3 // Configure the number of rotating buffers that are used when uploading _, err = UploadStreamToBlockBlob(ctx, bytes.NewReader(data), blockBlobURL, UploadStreamToBlockBlobOptions{BufferSize: bufferSize, MaxBuffers: maxBuffers}) // Verify that upload was successful if err != nil { log.Fatal(err) } } // This example shows how to perform various lease operations on a container. // The same lease operations can be performed on individual blobs as well. // A lease on a container prevents it from being deleted by others, while a lease on a blob // protects it from both modifications and deletions. func ExampleLeaseContainer() { // From the Azure portal, get your Storage account's name and account key. accountName, accountKey := accountInfo() // Use your Storage account's name and key to create a credential object; this is used to access your account. credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } // Create an ContainerURL object that wraps the container's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName)) containerURL := NewContainerURL(*u, NewPipeline(credential, PipelineOptions{})) // All operations allow you to specify a timeout via a Go context.Context object. ctx := context.Background() // This example uses a never-expiring context // Now acquire a lease on the container. // You can choose to pass an empty string for proposed ID so that the service automatically assigns one for you. acquireLeaseResponse, err := containerURL.AcquireLease(ctx, "", 60, ModifiedAccessConditions{}) if err != nil { log.Fatal(err) } fmt.Println("The container is leased for delete operations with lease ID", acquireLeaseResponse.LeaseID()) // The container cannot be deleted without providing the lease ID. _, err = containerURL.Delete(ctx, ContainerAccessConditions{}) if err == nil { log.Fatal("delete should have failed") } fmt.Println("The container cannot be deleted while there is an active lease") // We can release the lease now and the container can be deleted. _, err = containerURL.ReleaseLease(ctx, acquireLeaseResponse.LeaseID(), ModifiedAccessConditions{}) if err != nil { log.Fatal(err) } fmt.Println("The lease on the container is now released") // Acquire a lease again to perform other operations. acquireLeaseResponse, err = containerURL.AcquireLease(ctx, "", 60, ModifiedAccessConditions{}) if err != nil { log.Fatal(err) } fmt.Println("The container is leased again with lease ID", acquireLeaseResponse.LeaseID()) // We can change the ID of an existing lease. // A lease ID can be any valid GUID string format. newLeaseID := newUUID() newLeaseID[0] = 1 changeLeaseResponse, err := containerURL.ChangeLease(ctx, acquireLeaseResponse.LeaseID(), newLeaseID.String(), ModifiedAccessConditions{}) if err != nil { log.Fatal(err) } fmt.Println("The lease ID was changed to", changeLeaseResponse.LeaseID()) // The lease can be renewed. renewLeaseResponse, err := containerURL.RenewLease(ctx, changeLeaseResponse.LeaseID(), ModifiedAccessConditions{}) if err != nil { log.Fatal(err) } fmt.Println("The lease was renewed with the same ID", renewLeaseResponse.LeaseID()) // Finally, the lease can be broken and we could prevent others from acquiring a lease for a period of time _, err = containerURL.BreakLease(ctx, 60, ModifiedAccessConditions{}) if err != nil { log.Fatal(err) } fmt.Println("The lease was borken, and nobody can acquire a lease for 60 seconds") } // This example shows how to list blobs with hierarchy, by using a delimiter. func ExampleListBlobsHierarchy() { // From the Azure portal, get your Storage account's name and account key. accountName, accountKey := accountInfo() // Use your Storage account's name and key to create a credential object; this is used to access your account. credential, err := NewSharedKeyCredential(accountName, accountKey) if err != nil { log.Fatal(err) } // Create an ContainerURL object that wraps the container's URL and a default pipeline. u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/mycontainer", accountName)) containerURL := NewContainerURL(*u, NewPipeline(credential, PipelineOptions{})) // All operations allow you to specify a timeout via a Go context.Context object. ctx := context.Background() // This example uses a never-expiring context // Create 4 blobs: 3 of which have a virtual directory blobNames := []string{"a/1", "a/2", "b/1", "boaty_mcboatface"} for _, blobName := range blobNames { blobURL := containerURL.NewBlockBlobURL(blobName) _, err := blobURL.Upload(ctx, strings.NewReader("test"), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) if err != nil { log.Fatal("an error occurred while creating blobs for the example setup") } } // Perform a listing operation on blobs with hierarchy resp, err := containerURL.ListBlobsHierarchySegment(ctx, Marker{}, "/", ListBlobsSegmentOptions{}) if err != nil { log.Fatal("an error occurred while listing blobs") } // When a delimiter is used, the listing operation returns BlobPrefix elements that acts as // a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter character. // In our example, this means that a/ and b/ will be both returned fmt.Println("======First listing=====") for _, blobPrefix := range resp.Segment.BlobPrefixes { fmt.Println("The blob prefix with name", blobPrefix.Name, "was returned in the listing operation") } // The blobs that do not contain the delimiter are still returned for _, blob := range resp.Segment.BlobItems { fmt.Println("The blob with name", blob.Name, "was returned in the listing operation") } // For the prefixes that are returned, we can perform another listing operation on them, to see their contents resp, err = containerURL.ListBlobsHierarchySegment(ctx, Marker{}, "/", ListBlobsSegmentOptions{ Prefix: "a/", }) if err != nil { log.Fatal("an error occurred while listing blobs") } // This time, there is no blob prefix returned, since nothing under a/ has another / in its name. // In other words, in the virtual directory of a/, there aren't any sub-level virtual directory. fmt.Println("======Second listing=====") fmt.Println("No prefiex should be returned now, and the actual count is", len(resp.Segment.BlobPrefixes)) // The blobs a/1 and a/2 should be returned for _, blob := range resp.Segment.BlobItems { fmt.Println("The blob with name", blob.Name, "was returned in the listing operation") } // Delete the blobs created by this example for _, blobName := range blobNames { blobURL := containerURL.NewBlockBlobURL(blobName) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) if err != nil { log.Fatal("an error occurred while deleting the blobs created by the example") } } } azure-storage-blob-go-0.10.0/azblob/zt_hiddenfuncs_test.go000066400000000000000000000007441367515646300235720ustar00rootroot00000000000000package azblob // This file isn't normally compiled when not testing. // Therefore, since we're in the azblob package, we can export a method to help us construct a failing retry reader options. func InjectErrorInRetryReaderOptions(err error) RetryReaderOptions { return RetryReaderOptions{ MaxRetryRequests: 1, doInjectError: true, doInjectErrorRound: 0, injectedError: err, NotifyFailedRead: nil, TreatEarlyCloseAsError: false, } } azure-storage-blob-go-0.10.0/azblob/zt_highlevel_test.go000066400000000000000000000330271367515646300232470ustar00rootroot00000000000000package azblob import ( "context" "errors" "io/ioutil" "os" "sync/atomic" "time" chk "gopkg.in/check.v1" ) // create a test file func generateFile(fileName string, fileSize int) []byte { // generate random data _, bigBuff := getRandomDataAndReader(fileSize) // write to file and return the data ioutil.WriteFile(fileName, bigBuff, 0666) return bigBuff } func performUploadStreamToBlockBlobTest(c *chk.C, blobSize, bufferSize, maxBuffers int) { // Set up test container bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) // Set up test blob blobURL, _ := getBlockBlobURL(c, containerURL) // Create some data to test the upload stream blobContentReader, blobData := getRandomDataAndReader(blobSize) // Perform UploadStreamToBlockBlob uploadResp, err := UploadStreamToBlockBlob(ctx, blobContentReader, blobURL, UploadStreamToBlockBlobOptions{BufferSize: bufferSize, MaxBuffers: maxBuffers}) // Assert that upload was successful c.Assert(err, chk.Equals, nil) c.Assert(uploadResp.Response().StatusCode, chk.Equals, 201) // Download the blob to verify downloadResponse, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) // Assert that the content is correct actualBlobData, err := ioutil.ReadAll(downloadResponse.Response().Body) c.Assert(err, chk.IsNil) c.Assert(len(actualBlobData), chk.Equals, blobSize) c.Assert(actualBlobData, chk.DeepEquals, blobData) } func (s *aztestsSuite) TestUploadStreamToBlockBlobInChunks(c *chk.C) { blobSize := 8 * 1024 bufferSize := 1024 maxBuffers := 3 performUploadStreamToBlockBlobTest(c, blobSize, bufferSize, maxBuffers) } func (s *aztestsSuite) TestUploadStreamToBlockBlobSingleBuffer(c *chk.C) { blobSize := 8 * 1024 bufferSize := 1024 maxBuffers := 1 performUploadStreamToBlockBlobTest(c, blobSize, bufferSize, maxBuffers) } func (s *aztestsSuite) TestUploadStreamToBlockBlobSingleIO(c *chk.C) { blobSize := 1024 bufferSize := 8 * 1024 maxBuffers := 3 performUploadStreamToBlockBlobTest(c, blobSize, bufferSize, maxBuffers) } func (s *aztestsSuite) TestUploadStreamToBlockBlobSingleIOEdgeCase(c *chk.C) { blobSize := 8 * 1024 bufferSize := 8 * 1024 maxBuffers := 3 performUploadStreamToBlockBlobTest(c, blobSize, bufferSize, maxBuffers) } func (s *aztestsSuite) TestUploadStreamToBlockBlobEmpty(c *chk.C) { blobSize := 0 bufferSize := 8 * 1024 maxBuffers := 3 performUploadStreamToBlockBlobTest(c, blobSize, bufferSize, maxBuffers) } func performUploadAndDownloadFileTest(c *chk.C, fileSize, blockSize, parallelism, downloadOffset, downloadCount int) { // Set up file to upload fileName := "BigFile.bin" fileData := generateFile(fileName, fileSize) // Open the file to upload file, err := os.Open(fileName) c.Assert(err, chk.Equals, nil) defer file.Close() defer os.Remove(fileName) // Set up test container bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) // Set up test blob blockBlobURL, _ := getBlockBlobURL(c, containerURL) // Upload the file to a block blob response, err := UploadFileToBlockBlob(context.Background(), file, blockBlobURL, UploadToBlockBlobOptions{ BlockSize: int64(blockSize), Parallelism: uint16(parallelism), // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { c.Assert(bytesTransferred > 0 && bytesTransferred <= int64(fileSize), chk.Equals, true) }, }) c.Assert(err, chk.Equals, nil) c.Assert(response.Response().StatusCode, chk.Equals, 201) // Set up file to download the blob to destFileName := "BigFile-downloaded.bin" destFile, err := os.Create(destFileName) c.Assert(err, chk.Equals, nil) defer destFile.Close() defer os.Remove(destFileName) // Perform download err = DownloadBlobToFile(context.Background(), blockBlobURL.BlobURL, int64(downloadOffset), int64(downloadCount), destFile, DownloadFromBlobOptions{ BlockSize: int64(blockSize), Parallelism: uint16(parallelism), // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { c.Assert(bytesTransferred > 0 && bytesTransferred <= int64(fileSize), chk.Equals, true) }}) // Assert download was successful c.Assert(err, chk.Equals, nil) // Assert downloaded data is consistent var destBuffer []byte if downloadCount == CountToEnd { destBuffer = make([]byte, fileSize-downloadOffset) } else { destBuffer = make([]byte, downloadCount) } n, err := destFile.Read(destBuffer) c.Assert(err, chk.Equals, nil) if downloadOffset == 0 && downloadCount == 0 { c.Assert(destBuffer, chk.DeepEquals, fileData) } else { if downloadCount == 0 { c.Assert(n, chk.Equals, fileSize-downloadOffset) c.Assert(destBuffer, chk.DeepEquals, fileData[downloadOffset:]) } else { c.Assert(n, chk.Equals, downloadCount) c.Assert(destBuffer, chk.DeepEquals, fileData[downloadOffset:downloadOffset+downloadCount]) } } } func (s *aztestsSuite) TestUploadAndDownloadFileInChunks(c *chk.C) { fileSize := 8 * 1024 blockSize := 1024 parallelism := 3 performUploadAndDownloadFileTest(c, fileSize, blockSize, parallelism, 0, 0) } func (s *aztestsSuite) TestUploadAndDownloadFileSingleIO(c *chk.C) { fileSize := 1024 blockSize := 2048 parallelism := 3 performUploadAndDownloadFileTest(c, fileSize, blockSize, parallelism, 0, 0) } func (s *aztestsSuite) TestUploadAndDownloadFileSingleRoutine(c *chk.C) { fileSize := 8 * 1024 blockSize := 1024 parallelism := 1 performUploadAndDownloadFileTest(c, fileSize, blockSize, parallelism, 0, 0) } func (s *aztestsSuite) TestUploadAndDownloadFileEmpty(c *chk.C) { fileSize := 0 blockSize := 1024 parallelism := 3 performUploadAndDownloadFileTest(c, fileSize, blockSize, parallelism, 0, 0) } func (s *aztestsSuite) TestUploadAndDownloadFileNonZeroOffset(c *chk.C) { fileSize := 8 * 1024 blockSize := 1024 parallelism := 3 downloadOffset := 1000 downloadCount := 0 performUploadAndDownloadFileTest(c, fileSize, blockSize, parallelism, downloadOffset, downloadCount) } func (s *aztestsSuite) TestUploadAndDownloadFileNonZeroCount(c *chk.C) { fileSize := 8 * 1024 blockSize := 1024 parallelism := 3 downloadOffset := 0 downloadCount := 6000 performUploadAndDownloadFileTest(c, fileSize, blockSize, parallelism, downloadOffset, downloadCount) } func (s *aztestsSuite) TestUploadAndDownloadFileNonZeroOffsetAndCount(c *chk.C) { fileSize := 8 * 1024 blockSize := 1024 parallelism := 3 downloadOffset := 1000 downloadCount := 6000 performUploadAndDownloadFileTest(c, fileSize, blockSize, parallelism, downloadOffset, downloadCount) } func performUploadAndDownloadBufferTest(c *chk.C, blobSize, blockSize, parallelism, downloadOffset, downloadCount int) { // Set up buffer to upload _, bytesToUpload := getRandomDataAndReader(blobSize) // Set up test container bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) // Set up test blob blockBlobURL, _ := getBlockBlobURL(c, containerURL) // Pass the Context, stream, stream size, block blob URL, and options to StreamToBlockBlob response, err := UploadBufferToBlockBlob(context.Background(), bytesToUpload, blockBlobURL, UploadToBlockBlobOptions{ BlockSize: int64(blockSize), Parallelism: uint16(parallelism), // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { c.Assert(bytesTransferred > 0 && bytesTransferred <= int64(blobSize), chk.Equals, true) }, }) c.Assert(err, chk.Equals, nil) c.Assert(response.Response().StatusCode, chk.Equals, 201) // Set up buffer to download the blob to var destBuffer []byte if downloadCount == CountToEnd { destBuffer = make([]byte, blobSize-downloadOffset) } else { destBuffer = make([]byte, downloadCount) } // Download the blob to a buffer err = DownloadBlobToBuffer(context.Background(), blockBlobURL.BlobURL, int64(downloadOffset), int64(downloadCount), destBuffer, DownloadFromBlobOptions{ AccessConditions: BlobAccessConditions{}, BlockSize: int64(blockSize), Parallelism: uint16(parallelism), // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { c.Assert(bytesTransferred > 0 && bytesTransferred <= int64(blobSize), chk.Equals, true) }, }) c.Assert(err, chk.Equals, nil) if downloadOffset == 0 && downloadCount == 0 { c.Assert(destBuffer, chk.DeepEquals, bytesToUpload) } else { if downloadCount == 0 { c.Assert(destBuffer, chk.DeepEquals, bytesToUpload[downloadOffset:]) } else { c.Assert(destBuffer, chk.DeepEquals, bytesToUpload[downloadOffset:downloadOffset+downloadCount]) } } } func (s *aztestsSuite) TestUploadAndDownloadBufferInChunks(c *chk.C) { blobSize := 8 * 1024 blockSize := 1024 parallelism := 3 performUploadAndDownloadBufferTest(c, blobSize, blockSize, parallelism, 0, 0) } func (s *aztestsSuite) TestUploadAndDownloadBufferSingleIO(c *chk.C) { blobSize := 1024 blockSize := 8 * 1024 parallelism := 3 performUploadAndDownloadBufferTest(c, blobSize, blockSize, parallelism, 0, 0) } func (s *aztestsSuite) TestUploadAndDownloadBufferSingleRoutine(c *chk.C) { blobSize := 8 * 1024 blockSize := 1024 parallelism := 1 performUploadAndDownloadBufferTest(c, blobSize, blockSize, parallelism, 0, 0) } func (s *aztestsSuite) TestUploadAndDownloadBufferEmpty(c *chk.C) { blobSize := 0 blockSize := 1024 parallelism := 3 performUploadAndDownloadBufferTest(c, blobSize, blockSize, parallelism, 0, 0) } func (s *aztestsSuite) TestDownloadBufferWithNonZeroOffset(c *chk.C) { blobSize := 8 * 1024 blockSize := 1024 parallelism := 3 downloadOffset := 1000 downloadCount := 0 performUploadAndDownloadBufferTest(c, blobSize, blockSize, parallelism, downloadOffset, downloadCount) } func (s *aztestsSuite) TestDownloadBufferWithNonZeroCount(c *chk.C) { blobSize := 8 * 1024 blockSize := 1024 parallelism := 3 downloadOffset := 0 downloadCount := 6000 performUploadAndDownloadBufferTest(c, blobSize, blockSize, parallelism, downloadOffset, downloadCount) } func (s *aztestsSuite) TestDownloadBufferWithNonZeroOffsetAndCount(c *chk.C) { blobSize := 8 * 1024 blockSize := 1024 parallelism := 3 downloadOffset := 2000 downloadCount := 6 * 1024 performUploadAndDownloadBufferTest(c, blobSize, blockSize, parallelism, downloadOffset, downloadCount) } func (s *aztestsSuite) TestBasicDoBatchTransfer(c *chk.C) { // test the basic multi-routine processing type testInstance struct { transferSize int64 chunkSize int64 parallelism uint16 expectError bool } testMatrix := []testInstance{ {transferSize: 100, chunkSize: 10, parallelism: 5, expectError: false}, {transferSize: 100, chunkSize: 9, parallelism: 4, expectError: false}, {transferSize: 100, chunkSize: 8, parallelism: 15, expectError: false}, {transferSize: 100, chunkSize: 1, parallelism: 3, expectError: false}, {transferSize: 0, chunkSize: 100, parallelism: 5, expectError: false}, // empty file works {transferSize: 100, chunkSize: 0, parallelism: 5, expectError: true}, // 0 chunk size on the other hand must fail {transferSize: 0, chunkSize: 0, parallelism: 5, expectError: true}, } for _, test := range testMatrix { ctx := context.Background() // maintain some counts to make sure the right number of chunks were queued, and the total size is correct totalSizeCount := int64(0) runCount := int64(0) err := DoBatchTransfer(ctx, BatchTransferOptions{ TransferSize: test.transferSize, ChunkSize: test.chunkSize, Parallelism: test.parallelism, Operation: func(offset int64, chunkSize int64, ctx context.Context) error { atomic.AddInt64(&totalSizeCount, chunkSize) atomic.AddInt64(&runCount, 1) return nil }, OperationName: "TestHappyPath", }) if test.expectError { c.Assert(err, chk.NotNil) } else { c.Assert(err, chk.IsNil) c.Assert(totalSizeCount, chk.Equals, test.transferSize) c.Assert(runCount, chk.Equals, ((test.transferSize-1)/test.chunkSize)+1) } } } // mock a memory mapped file (low-quality mock, meant to simulate the scenario only) type mockMMF struct { isClosed bool failHandle *chk.C } // accept input func (m *mockMMF) write(input string) { if m.isClosed { // simulate panic m.failHandle.Fail() } } func (s *aztestsSuite) TestDoBatchTransferWithError(c *chk.C) { ctx := context.Background() mmf := mockMMF{failHandle: c} expectedFirstError := errors.New("#3 means trouble") err := DoBatchTransfer(ctx, BatchTransferOptions{ TransferSize: 5, ChunkSize: 1, Parallelism: 5, Operation: func(offset int64, chunkSize int64, ctx context.Context) error { // simulate doing some work (HTTP call in real scenarios) // later chunks later longer to finish time.Sleep(time.Second * time.Duration(offset)) // simulate having gotten data and write it to the memory mapped file mmf.write("input") // with one of the chunks, pretend like an error occurred (like the network connection breaks) if offset == 3 { return expectedFirstError } else if offset > 3 { // anything after offset=3 are canceled // so verify that the context indeed got canceled ctxErr := ctx.Err() c.Assert(ctxErr, chk.Equals, context.Canceled) return ctxErr } // anything before offset=3 should be done without problem return nil }, OperationName: "TestErrorPath", }) c.Assert(err, chk.Equals, expectedFirstError) // simulate closing the mmf and make sure no panic occurs (as reported in #139) mmf.isClosed = true time.Sleep(time.Second * 5) } azure-storage-blob-go-0.10.0/azblob/zt_policy_retry_test.go000066400000000000000000000200631367515646300240200ustar00rootroot00000000000000package azblob import ( "context" "fmt" "io" "net/http" "net/url" "strings" "time" chk "gopkg.in/check.v1" "github.com/Azure/azure-pipeline-go/pipeline" ) // For testing docs, see: https://labix.org/gocheck // To test a specific test: go test -check.f MyTestSuite type retryTestScenario int32 const ( // Retry until success. Max reties hit. Operation time out prevents additional retries retryTestScenarioRetryUntilSuccess retryTestScenario = 1 retryTestScenarioRetryUntilOperationCancel retryTestScenario = 2 retryTestScenarioRetryUntilMaxRetries retryTestScenario = 3 ) func (s *aztestsSuite) TestRetryTestScenarioUntilSuccess(c *chk.C) { testRetryTestScenario(c, retryTestScenarioRetryUntilSuccess) } func (s *aztestsSuite) TestRetryTestScenarioUntilOperationCancel(c *chk.C) { testRetryTestScenario(c, retryTestScenarioRetryUntilOperationCancel) } func (s *aztestsSuite) TestRetryTestScenarioUntilMaxRetries(c *chk.C) { testRetryTestScenario(c, retryTestScenarioRetryUntilMaxRetries) } func newRetryTestPolicyFactory(c *chk.C, scenario retryTestScenario, maxRetries int32, cancel context.CancelFunc) *retryTestPolicyFactory { return &retryTestPolicyFactory{c: c, scenario: scenario, maxRetries: maxRetries, cancel: cancel} } type retryTestPolicyFactory struct { c *chk.C scenario retryTestScenario maxRetries int32 cancel context.CancelFunc try int32 } func (f *retryTestPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { f.try = 0 // Reset this for each test return &retryTestPolicy{factory: f, next: next} } type retryTestPolicy struct { next pipeline.Policy factory *retryTestPolicyFactory } type retryError struct { temporary, timeout bool } func (e *retryError) Temporary() bool { return e.temporary } func (e *retryError) Timeout() bool { return e.timeout } func (e *retryError) Error() string { return fmt.Sprintf("Temporary=%t, Timeout=%t", e.Temporary(), e.Timeout()) } type httpResponse struct { response *http.Response } func (r *httpResponse) Response() *http.Response { return r.response } func (p *retryTestPolicy) Do(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { c := p.factory.c p.factory.try++ // Increment the try c.Assert(p.factory.try <= p.factory.maxRetries, chk.Equals, true) // Ensure # of tries < MaxRetries req := request.Request // Validate the expected pre-conditions for each try expectedHost := "PrimaryDC" if p.factory.try%2 == 0 { if p.factory.scenario != retryTestScenarioRetryUntilSuccess || p.factory.try <= 4 { expectedHost = "SecondaryDC" } } c.Assert(req.URL.Host, chk.Equals, expectedHost) // Ensure we got the expected primary/secondary DC // Ensure that any headers & query parameters this method adds (later) are removed/reset for each try c.Assert(req.Header.Get("TestHeader"), chk.Equals, "") // Ensure our "TestHeader" is not in the HTTP request values := req.URL.Query() c.Assert(len(values["TestQueryParam"]), chk.Equals, 0) // TestQueryParam shouldn't be in the HTTP request if seeker, ok := req.Body.(io.ReadSeeker); !ok { c.Fail() // Body must be an io.ReadSeeker } else { pos, err := seeker.Seek(0, io.SeekCurrent) c.Assert(err, chk.IsNil) // Ensure that body was seekable c.Assert(pos, chk.Equals, int64(0)) // Ensure body seeked back to position 0 } // Add a query param & header; these not be here on the next try values["TestQueryParam"] = []string{"TestQueryParamValue"} req.Header.Set("TestHeader", "TestValue") // Add a header this not exist with each try b := []byte{0} n, err := req.Body.Read(b) c.Assert(n, chk.Equals, 1) // Read failed switch p.factory.scenario { case retryTestScenarioRetryUntilSuccess: switch p.factory.try { case 1: if deadline, ok := ctx.Deadline(); ok { time.Sleep(time.Until(deadline) + time.Second) // Let the context timeout expire } err = ctx.Err() case 2: err = &retryError{temporary: true} case 3: err = &retryError{timeout: true} case 4: response = &httpResponse{response: &http.Response{StatusCode: http.StatusNotFound}} case 5: err = &retryError{temporary: true} // These attempts all fail but we're making sure we never see the secondary DC again case 6: response = &httpResponse{response: &http.Response{StatusCode: http.StatusOK}} // Stop retries with valid response default: c.Fail() // Retries should have stopped so we shouldn't get here } case retryTestScenarioRetryUntilOperationCancel: switch p.factory.try { case 1: p.factory.cancel() err = context.Canceled default: c.Fail() // Retries should have stopped so we shouldn't get here } case retryTestScenarioRetryUntilMaxRetries: err = &retryError{temporary: true} // Keep retrying until maxRetries is hit } return response, err // Return the response & err } func testRetryTestScenario(c *chk.C, scenario retryTestScenario) { u, _ := url.Parse("http://PrimaryDC") retryOptions := RetryOptions{ Policy: RetryPolicyExponential, MaxTries: 6, TryTimeout: 2 * time.Second, RetryDelay: 1 * time.Second, MaxRetryDelay: 4 * time.Second, RetryReadsFromSecondaryHost: "SecondaryDC", } minExpectedTimeToMaxRetries := (retryOptions.MaxRetryDelay * time.Duration(retryOptions.MaxTries-3)) / 2 // a very rough approximation, of a lower bound, given assumption that we hit the cap early in the retry count, and pessimistically assuming that all get halved by random jitter calcs ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 64 /*2^MaxTries(6)*/ *retryOptions.TryTimeout) retrytestPolicyFactory := newRetryTestPolicyFactory(c, scenario, retryOptions.MaxTries, cancel) factories := [...]pipeline.Factory{ NewRetryPolicyFactory(retryOptions), retrytestPolicyFactory, } p := pipeline.NewPipeline(factories[:], pipeline.Options{}) request, err := pipeline.NewRequest(http.MethodGet, *u, strings.NewReader("TestData")) start := time.Now() response, err := p.Do(ctx, nil, request) switch scenario { case retryTestScenarioRetryUntilSuccess: if err != nil || response == nil || response.Response() == nil || response.Response().StatusCode != http.StatusOK { c.Fail() // Operation didn't run to success } case retryTestScenarioRetryUntilMaxRetries: c.Assert(err, chk.NotNil) // Ensure we ended with an error c.Assert(response, chk.IsNil) // Ensure we ended without a valid response c.Assert(retrytestPolicyFactory.try, chk.Equals, retryOptions.MaxTries) // Ensure the operation ends with the exact right number of tries c.Assert(time.Since(start) > minExpectedTimeToMaxRetries, chk.Equals, true) // Ensure it took about as long to get here as we expect (bearing in mind randomness in the jitter), as a basic sanity check of our delay duration calculations case retryTestScenarioRetryUntilOperationCancel: c.Assert(err, chk.Equals, context.Canceled) // Ensure we ended due to cancellation c.Assert(response, chk.IsNil) // Ensure we ended without a valid response c.Assert(retrytestPolicyFactory.try <= retryOptions.MaxTries, chk.Equals, true) // Ensure we didn't end due to reaching max tries } cancel() } /* Fail primary; retry should be on secondary URL - maybe do this twice Fail secondary; and never see primary again Make sure any mutations are lost on each retry Make sure body is reset on each retry Timeout a try; should retry (unless no more) timeout an operation; should not retry check timeout query param; should be try timeout Return Temporary() = true; should retry (unless max) Return Timeout() true; should retry (unless max) Secondary try returns 404; no more tries against secondary error where Temporary() and Timeout() return false; no retry error where Temporary() & Timeout don't exist; no retry no error; no retry; return success, nil */ azure-storage-blob-go-0.10.0/azblob/zt_retry_reader_test.go000066400000000000000000000323411367515646300237650ustar00rootroot00000000000000package azblob import ( "context" "crypto/rand" "errors" "fmt" "io" "net" "net/http" "time" chk "gopkg.in/check.v1" ) // Testings for RetryReader // This reader return one byte through each Read call type perByteReader struct { RandomBytes []byte // Random generated bytes byteCount int // Bytes can be returned before EOF currentByteIndex int // Bytes that have already been returned. doInjectError bool doInjectErrorByteIndex int doInjectTimes int injectedError error // sleepDuraion and closeChannel are only use in "forced cancellation" tests sleepDuration time.Duration closeChannel chan struct{} } func newPerByteReader(byteCount int) *perByteReader { perByteReader := perByteReader{ byteCount: byteCount, closeChannel: nil, } perByteReader.RandomBytes = make([]byte, byteCount) _, _ = rand.Read(perByteReader.RandomBytes) return &perByteReader } func newSingleUsePerByteReader(contents []byte) *perByteReader { perByteReader := perByteReader{ byteCount: len(contents), closeChannel: make(chan struct{}, 10), } perByteReader.RandomBytes = contents return &perByteReader } func (r *perByteReader) Read(b []byte) (n int, err error) { if r.doInjectError && r.doInjectErrorByteIndex == r.currentByteIndex && r.doInjectTimes > 0 { r.doInjectTimes-- return 0, r.injectedError } if r.currentByteIndex < r.byteCount { n = copy(b, r.RandomBytes[r.currentByteIndex:r.currentByteIndex+1]) r.currentByteIndex += n // simulate a delay, which may be successful or, if we're closed from another go-routine, may return an // error select { case <-r.closeChannel: return n, errors.New(ReadOnClosedBodyMessage) case <-time.After(r.sleepDuration): return n, nil } } return 0, io.EOF } func (r *perByteReader) Close() error { if r.closeChannel != nil { r.closeChannel <- struct{}{} } return nil } // Test normal retry succeed, note initial response not provided. // Tests both with and without notification of failures func (s *aztestsSuite) TestRetryReaderReadWithRetry(c *chk.C) { // Test twice, the second time using the optional "logging"/notification callback for failed tries // We must test both with and without the callback, since be testing without // we are testing that it is, indeed, optional to provide the callback for _, logThisRun := range []bool{false, true} { // Extra setup for testing notification of failures (i.e. of unsuccessful tries) failureMethodNumCalls := 0 failureWillRetryCount := 0 failureLastReportedFailureCount := -1 var failureLastReportedError error = nil failureMethod := func(failureCount int, lastError error, offset int64, count int64, willRetry bool) { failureMethodNumCalls++ if willRetry { failureWillRetryCount++ } failureLastReportedFailureCount = failureCount failureLastReportedError = lastError } // Main test setup byteCount := 1 body := newPerByteReader(byteCount) body.doInjectError = true body.doInjectErrorByteIndex = 0 body.doInjectTimes = 1 body.injectedError = &net.DNSError{IsTemporary: true} getter := func(ctx context.Context, info HTTPGetterInfo) (*http.Response, error) { r := http.Response{} body.currentByteIndex = int(info.Offset) r.Body = body return &r, nil } httpGetterInfo := HTTPGetterInfo{Offset: 0, Count: int64(byteCount)} initResponse, err := getter(context.Background(), httpGetterInfo) c.Assert(err, chk.IsNil) rrOptions := RetryReaderOptions{MaxRetryRequests: 1} if logThisRun { rrOptions.NotifyFailedRead = failureMethod } retryReader := NewRetryReader(context.Background(), initResponse, httpGetterInfo, rrOptions, getter) // should fail and succeed through retry can := make([]byte, 1) n, err := retryReader.Read(can) c.Assert(n, chk.Equals, 1) c.Assert(err, chk.IsNil) // check "logging", if it was enabled if logThisRun { // We only expect one failed try in this test // And the notification method is not called for successes c.Assert(failureMethodNumCalls, chk.Equals, 1) // this is the number of calls we counted c.Assert(failureWillRetryCount, chk.Equals, 1) // the sole failure was retried c.Assert(failureLastReportedFailureCount, chk.Equals, 1) // this is the number of failures reported by the notification method c.Assert(failureLastReportedError, chk.NotNil) } // should return EOF n, err = retryReader.Read(can) c.Assert(n, chk.Equals, 0) c.Assert(err, chk.Equals, io.EOF) } } // Test normal retry succeed, note initial response not provided. // Tests both with and without notification of failures func (s *aztestsSuite) TestRetryReaderWithRetryIoUnexpectedEOF(c *chk.C) { // Test twice, the second time using the optional "logging"/notification callback for failed tries // We must test both with and without the callback, since be testing without // we are testing that it is, indeed, optional to provide the callback for _, logThisRun := range []bool{false, true} { // Extra setup for testing notification of failures (i.e. of unsuccessful tries) failureMethodNumCalls := 0 failureWillRetryCount := 0 failureLastReportedFailureCount := -1 var failureLastReportedError error = nil failureMethod := func(failureCount int, lastError error, offset int64, count int64, willRetry bool) { failureMethodNumCalls++ if willRetry { failureWillRetryCount++ } failureLastReportedFailureCount = failureCount failureLastReportedError = lastError } // Main test setup byteCount := 1 body := newPerByteReader(byteCount) body.doInjectError = true body.doInjectErrorByteIndex = 0 body.doInjectTimes = 1 body.injectedError = io.ErrUnexpectedEOF getter := func(ctx context.Context, info HTTPGetterInfo) (*http.Response, error) { r := http.Response{} body.currentByteIndex = int(info.Offset) r.Body = body return &r, nil } httpGetterInfo := HTTPGetterInfo{Offset: 0, Count: int64(byteCount)} initResponse, err := getter(context.Background(), httpGetterInfo) c.Assert(err, chk.IsNil) rrOptions := RetryReaderOptions{MaxRetryRequests: 1} if logThisRun { rrOptions.NotifyFailedRead = failureMethod } retryReader := NewRetryReader(context.Background(), initResponse, httpGetterInfo, rrOptions, getter) // should fail and succeed through retry can := make([]byte, 1) n, err := retryReader.Read(can) c.Assert(n, chk.Equals, 1) c.Assert(err, chk.IsNil) // check "logging", if it was enabled if logThisRun { // We only expect one failed try in this test // And the notification method is not called for successes c.Assert(failureMethodNumCalls, chk.Equals, 1) // this is the number of calls we counted c.Assert(failureWillRetryCount, chk.Equals, 1) // the sole failure was retried c.Assert(failureLastReportedFailureCount, chk.Equals, 1) // this is the number of failures reported by the notification method c.Assert(failureLastReportedError, chk.NotNil) } // should return EOF n, err = retryReader.Read(can) c.Assert(n, chk.Equals, 0) c.Assert(err, chk.Equals, io.EOF) } } // Test normal retry fail as retry Count not enough. func (s *aztestsSuite) TestRetryReaderReadNegativeNormalFail(c *chk.C) { // Extra setup for testing notification of failures (i.e. of unsuccessful tries) failureMethodNumCalls := 0 failureWillRetryCount := 0 failureLastReportedFailureCount := -1 var failureLastReportedError error = nil failureMethod := func(failureCount int, lastError error, offset int64, count int64, willRetry bool) { failureMethodNumCalls++ if willRetry { failureWillRetryCount++ } failureLastReportedFailureCount = failureCount failureLastReportedError = lastError } // Main test setup byteCount := 1 body := newPerByteReader(byteCount) body.doInjectError = true body.doInjectErrorByteIndex = 0 body.doInjectTimes = 2 body.injectedError = &net.DNSError{IsTemporary: true} startResponse := http.Response{} startResponse.Body = body getter := func(ctx context.Context, info HTTPGetterInfo) (*http.Response, error) { r := http.Response{} body.currentByteIndex = int(info.Offset) r.Body = body return &r, nil } rrOptions := RetryReaderOptions{ MaxRetryRequests: 1, NotifyFailedRead: failureMethod} retryReader := NewRetryReader(context.Background(), &startResponse, HTTPGetterInfo{Offset: 0, Count: int64(byteCount)}, rrOptions, getter) // should fail can := make([]byte, 1) n, err := retryReader.Read(can) c.Assert(n, chk.Equals, 0) c.Assert(err, chk.Equals, body.injectedError) // Check that we recieved the right notification callbacks // We only expect two failed tries in this test, but only one // of the would have had willRetry = true c.Assert(failureMethodNumCalls, chk.Equals, 2) // this is the number of calls we counted c.Assert(failureWillRetryCount, chk.Equals, 1) // only the first failure was retried c.Assert(failureLastReportedFailureCount, chk.Equals, 2) // this is the number of failures reported by the notification method c.Assert(failureLastReportedError, chk.NotNil) } // Test boundary case when Count equals to 0 and fail. func (s *aztestsSuite) TestRetryReaderReadCount0(c *chk.C) { byteCount := 1 body := newPerByteReader(byteCount) body.doInjectError = true body.doInjectErrorByteIndex = 1 body.doInjectTimes = 1 body.injectedError = &net.DNSError{IsTemporary: true} startResponse := http.Response{} startResponse.Body = body getter := func(ctx context.Context, info HTTPGetterInfo) (*http.Response, error) { r := http.Response{} body.currentByteIndex = int(info.Offset) r.Body = body return &r, nil } retryReader := NewRetryReader(context.Background(), &startResponse, HTTPGetterInfo{Offset: 0, Count: int64(byteCount)}, RetryReaderOptions{MaxRetryRequests: 1}, getter) // should consume the only byte can := make([]byte, 1) n, err := retryReader.Read(can) c.Assert(n, chk.Equals, 1) c.Assert(err, chk.IsNil) // should not read when Count=0, and should return EOF n, err = retryReader.Read(can) c.Assert(n, chk.Equals, 0) c.Assert(err, chk.Equals, io.EOF) } func (s *aztestsSuite) TestRetryReaderReadNegativeNonRetriableError(c *chk.C) { byteCount := 1 body := newPerByteReader(byteCount) body.doInjectError = true body.doInjectErrorByteIndex = 0 body.doInjectTimes = 1 body.injectedError = fmt.Errorf("not retriable error") startResponse := http.Response{} startResponse.Body = body getter := func(ctx context.Context, info HTTPGetterInfo) (*http.Response, error) { r := http.Response{} body.currentByteIndex = int(info.Offset) r.Body = body return &r, nil } retryReader := NewRetryReader(context.Background(), &startResponse, HTTPGetterInfo{Offset: 0, Count: int64(byteCount)}, RetryReaderOptions{MaxRetryRequests: 2}, getter) dest := make([]byte, 1) _, err := retryReader.Read(dest) c.Assert(err, chk.Equals, body.injectedError) } // Test the case where we programmatically force a retry to happen, via closing the body early from another goroutine // Unlike the retries orchestrated elsewhere in this test file, which simulate network failures for the // purposes of unit testing, here we are testing the cancellation mechanism that is exposed to // consumers of the API, to allow programmatic forcing of retries (e.g. if the consumer deems // the read to be taking too long, they may force a retry in the hope of better performance next time). func (s *aztestsSuite) TestRetryReaderReadWithForcedRetry(c *chk.C) { for _, enableRetryOnEarlyClose := range []bool{false, true} { // use the notification callback, so we know that the retry really did happen failureMethodNumCalls := 0 failureMethod := func(failureCount int, lastError error, offset int64, count int64, willRetry bool) { failureMethodNumCalls++ } // Main test setup byteCount := 10 // so multiple passes through read loop will be required sleepDuration := 100 * time.Millisecond randBytes := make([]byte, byteCount) _, _ = rand.Read(randBytes) getter := func(ctx context.Context, info HTTPGetterInfo) (*http.Response, error) { body := newSingleUsePerByteReader(randBytes) // make new one every time, since we force closes in this test, and its unusable after a close body.sleepDuration = sleepDuration r := http.Response{} body.currentByteIndex = int(info.Offset) r.Body = body return &r, nil } httpGetterInfo := HTTPGetterInfo{Offset: 0, Count: int64(byteCount)} initResponse, err := getter(context.Background(), httpGetterInfo) c.Assert(err, chk.IsNil) rrOptions := RetryReaderOptions{MaxRetryRequests: 2, TreatEarlyCloseAsError: !enableRetryOnEarlyClose} rrOptions.NotifyFailedRead = failureMethod retryReader := NewRetryReader(context.Background(), initResponse, httpGetterInfo, rrOptions, getter) // set up timed cancellation from separate goroutine go func() { time.Sleep(sleepDuration * 5) retryReader.Close() }() // do the read (should fail, due to forced cancellation, and succeed through retry) output := make([]byte, byteCount) n, err := io.ReadFull(retryReader, output) if enableRetryOnEarlyClose { c.Assert(n, chk.Equals, byteCount) c.Assert(err, chk.IsNil) c.Assert(output, chk.DeepEquals, randBytes) c.Assert(failureMethodNumCalls, chk.Equals, 1) // assert that the cancellation did indeed happen } else { c.Assert(err, chk.NotNil) } } } // End testings for RetryReader azure-storage-blob-go-0.10.0/azblob/zt_sas_blob_snapshot_test.go000066400000000000000000000064651367515646300250110ustar00rootroot00000000000000package azblob import ( "bytes" "strings" "time" chk "gopkg.in/check.v1" ) func (s *aztestsSuite) TestSnapshotSAS(c *chk.C) { //Generate URLs ---------------------------------------------------------------------------------------------------- bsu := getBSU() containerURL, containerName := getContainerURL(c, bsu) blobURL, blobName := getBlockBlobURL(c, containerURL) _, err := containerURL.Create(ctx, Metadata{}, PublicAccessNone) defer containerURL.Delete(ctx, ContainerAccessConditions{}) if err != nil { c.Fatal(err) } //Create file in container, download from snapshot to test. -------------------------------------------------------- burl := containerURL.NewBlockBlobURL(blobName) data := "Hello world!" _, err = burl.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}) if err != nil { c.Fatal(err) } //Create a snapshot & URL createSnapshot, err := burl.CreateSnapshot(ctx, Metadata{}, BlobAccessConditions{}) if err != nil { c.Fatal(err) } //Format snapshot time snapTime, err := time.Parse(SnapshotTimeFormat, createSnapshot.Snapshot()) if err != nil { c.Fatal(err) } //Get credentials & current time currentTime := time.Now().UTC() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } //Create SAS query snapSASQueryParams, err := BlobSASSignatureValues{ StartTime: currentTime, ExpiryTime: currentTime.Add(48 * time.Hour), SnapshotTime: snapTime, Permissions: "racwd", ContainerName: containerName, BlobName: blobName, Protocol: SASProtocolHTTPS, }.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } //Attach SAS query to block blob URL p := NewPipeline(NewAnonymousCredential(), PipelineOptions{}) snapParts := NewBlobURLParts(blobURL.URL()) snapParts.SAS = snapSASQueryParams sburl := NewBlockBlobURL(snapParts.URL(), p) //Test the snapshot downloadResponse, err := sburl.Download(ctx, 0, 0, BlobAccessConditions{}, false) if err != nil { c.Fatal(err) } downloadedData := &bytes.Buffer{} reader := downloadResponse.Body(RetryReaderOptions{}) downloadedData.ReadFrom(reader) reader.Close() c.Assert(data, chk.Equals, downloadedData.String()) //Try to delete snapshot ------------------------------------------------------------------------------------------- _, err = sburl.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) if err != nil { //This shouldn't fail. c.Fatal(err) } //Create a normal blob and attempt to use the snapshot SAS against it (assuming failure) --------------------------- //If this succeeds, it means a normal SAS token was created. fsburl := containerURL.NewBlockBlobURL("failsnap") _, err = fsburl.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}) if err != nil { c.Fatal(err) //should succeed to create the blob via normal auth means } fsburlparts := NewBlobURLParts(fsburl.URL()) fsburlparts.SAS = snapSASQueryParams fsburl = NewBlockBlobURL(fsburlparts.URL(), p) //re-use fsburl as we don't need the sharedkey version anymore resp, err := fsburl.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) if err == nil { c.Fatal(resp) //This SHOULD fail. Otherwise we have a normal SAS token... } } azure-storage-blob-go-0.10.0/azblob/zt_test.go000066400000000000000000000310661367515646300212210ustar00rootroot00000000000000package azblob import ( "bytes" "context" "encoding/json" "errors" "fmt" "io/ioutil" "math/rand" "net/url" "os" "reflect" "runtime" "strings" "testing" "time" chk "gopkg.in/check.v1" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/go-autorest/autorest/adal" ) // For testing docs, see: https://labix.org/gocheck // To test a specific test: go test -check.f MyTestSuite // Hookup to the testing framework func Test(t *testing.T) { chk.TestingT(t) } type aztestsSuite struct{} var _ = chk.Suite(&aztestsSuite{}) func (s *aztestsSuite) TestRetryPolicyRetryReadsFromSecondaryHostField(c *chk.C) { _, found := reflect.TypeOf(RetryOptions{}).FieldByName("RetryReadsFromSecondaryHost") if !found { // Make sure the RetryOption was not erroneously overwritten c.Fatal("RetryOption's RetryReadsFromSecondaryHost field must exist in the Blob SDK - uncomment it and make sure the field is returned from the retryReadsFromSecondaryHost() method too!") } } const ( containerPrefix = "go" blobPrefix = "gotestblob" blockBlobDefaultData = "GoBlockBlobData" validationErrorSubstring = "validation failed" invalidHeaderErrorSubstring = "invalid header field" // error thrown by the http client ) var ctx = context.Background() var basicHeaders = BlobHTTPHeaders{ ContentType: "my_type", ContentDisposition: "my_disposition", CacheControl: "control", ContentMD5: nil, ContentLanguage: "my_language", ContentEncoding: "my_encoding", } var basicMetadata = Metadata{"foo": "bar"} type testPipeline struct{} const testPipelineMessage string = "Test factory invoked" func (tm testPipeline) Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error) { return nil, errors.New(testPipelineMessage) } // This function generates an entity name by concatenating the passed prefix, // the name of the test requesting the entity name, and the minute, second, and nanoseconds of the call. // This should make it easy to associate the entities with their test, uniquely identify // them, and determine the order in which they were created. // Note that this imposes a restriction on the length of test names func generateName(prefix string) string { // These next lines up through the for loop are obtaining and walking up the stack // trace to extrat the test name, which is stored in name pc := make([]uintptr, 10) runtime.Callers(0, pc) frames := runtime.CallersFrames(pc) name := "" for f, next := frames.Next(); next; f, next = frames.Next() { name = f.Function if strings.Contains(name, "Suite") { break } } funcNameStart := strings.Index(name, "Test") name = name[funcNameStart+len("Test"):] // Just get the name of the test and not any of the garbage at the beginning name = strings.ToLower(name) // Ensure it is a valid resource name currentTime := time.Now() name = fmt.Sprintf("%s%s%d%d%d", prefix, strings.ToLower(name), currentTime.Minute(), currentTime.Second(), currentTime.Nanosecond()) return name } func generateContainerName() string { return generateName(containerPrefix) } func generateBlobName() string { return generateName(blobPrefix) } func getContainerURL(c *chk.C, bsu ServiceURL) (container ContainerURL, name string) { name = generateContainerName() container = bsu.NewContainerURL(name) return container, name } func getBlockBlobURL(c *chk.C, container ContainerURL) (blob BlockBlobURL, name string) { name = generateBlobName() blob = container.NewBlockBlobURL(name) return blob, name } func getAppendBlobURL(c *chk.C, container ContainerURL) (blob AppendBlobURL, name string) { name = generateBlobName() blob = container.NewAppendBlobURL(name) return blob, name } func getPageBlobURL(c *chk.C, container ContainerURL) (blob PageBlobURL, name string) { name = generateBlobName() blob = container.NewPageBlobURL(name) return } func getReaderToRandomBytes(n int) *bytes.Reader { r, _ := getRandomDataAndReader(n) return r } func getRandomDataAndReader(n int) (*bytes.Reader, []byte) { data := make([]byte, n, n) rand.Read(data) return bytes.NewReader(data), data } func createNewContainer(c *chk.C, bsu ServiceURL) (container ContainerURL, name string) { container, name = getContainerURL(c, bsu) cResp, err := container.Create(ctx, nil, PublicAccessNone) c.Assert(err, chk.IsNil) c.Assert(cResp.StatusCode(), chk.Equals, 201) return container, name } func createNewContainerWithSuffix(c *chk.C, bsu ServiceURL, suffix string) (container ContainerURL, name string) { // The goal of adding the suffix is to be able to predetermine what order the containers will be in when listed. // We still need the container prefix to come first, though, to ensure only containers as a part of this test // are listed at all. name = generateName(containerPrefix + suffix) container = bsu.NewContainerURL(name) cResp, err := container.Create(ctx, nil, PublicAccessNone) c.Assert(err, chk.IsNil) c.Assert(cResp.StatusCode(), chk.Equals, 201) return container, name } func createNewBlockBlob(c *chk.C, container ContainerURL) (blob BlockBlobURL, name string) { blob, name = getBlockBlobURL(c, container) cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(cResp.StatusCode(), chk.Equals, 201) return } func createNewAppendBlob(c *chk.C, container ContainerURL) (blob AppendBlobURL, name string) { blob, name = getAppendBlobURL(c, container) resp, err := blob.Create(ctx, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.StatusCode(), chk.Equals, 201) return } func createNewPageBlob(c *chk.C, container ContainerURL) (blob PageBlobURL, name string) { blob, name = getPageBlobURL(c, container) resp, err := blob.Create(ctx, PageBlobPageBytes*10, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.StatusCode(), chk.Equals, 201) return } func createNewPageBlobWithSize(c *chk.C, container ContainerURL, sizeInBytes int64) (blob PageBlobURL, name string) { blob, name = getPageBlobURL(c, container) resp, err := blob.Create(ctx, sizeInBytes, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.StatusCode(), chk.Equals, 201) return } func createBlockBlobWithPrefix(c *chk.C, container ContainerURL, prefix string) (blob BlockBlobURL, name string) { name = prefix + generateName(blobPrefix) blob = container.NewBlockBlobURL(name) cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(cResp.StatusCode(), chk.Equals, 201) return } func deleteContainer(c *chk.C, container ContainerURL) { resp, err := container.Delete(ctx, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.StatusCode(), chk.Equals, 202) } func getGenericCredential(accountType string) (*SharedKeyCredential, error) { accountNameEnvVar := accountType + "ACCOUNT_NAME" accountKeyEnvVar := accountType + "ACCOUNT_KEY" accountName, accountKey := os.Getenv(accountNameEnvVar), os.Getenv(accountKeyEnvVar) if accountName == "" || accountKey == "" { return nil, errors.New(accountNameEnvVar + " and/or " + accountKeyEnvVar + " environment variables not specified.") } return NewSharedKeyCredential(accountName, accountKey) } //getOAuthCredential can intake a OAuth credential from environment variables in one of the following ways: //Direct: Supply a ADAL OAuth token in OAUTH_TOKEN and application ID in APPLICATION_ID to refresh the supplied token. //Client secret: Supply a client secret in CLIENT_SECRET and application ID in APPLICATION_ID for SPN auth. //TENANT_ID is optional and will be inferred as common if it is not explicitly defined. func getOAuthCredential(accountType string) (*TokenCredential, error) { oauthTokenEnvVar := accountType + "OAUTH_TOKEN" clientSecretEnvVar := accountType + "CLIENT_SECRET" applicationIdEnvVar := accountType + "APPLICATION_ID" tenantIdEnvVar := accountType + "TENANT_ID" oauthToken, appId, tenantId, clientSecret := []byte(os.Getenv(oauthTokenEnvVar)), os.Getenv(applicationIdEnvVar), os.Getenv(tenantIdEnvVar), os.Getenv(clientSecretEnvVar) if (len(oauthToken) == 0 && clientSecret == "") || appId == "" { return nil, errors.New("(" + oauthTokenEnvVar + " OR " + clientSecretEnvVar + ") and/or " + applicationIdEnvVar + " environment variables not specified.") } if tenantId == "" { tenantId = "common" } var Token adal.Token if len(oauthToken) != 0 { if err := json.Unmarshal(oauthToken, &Token); err != nil { return nil, err } } var spt *adal.ServicePrincipalToken oauthConfig, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantId) if err != nil { return nil, err } if len(oauthToken) == 0 { spt, err = adal.NewServicePrincipalToken( *oauthConfig, appId, clientSecret, "https://storage.azure.com") if err != nil { return nil, err } } else { spt, err = adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, appId, "https://storage.azure.com", Token, ) if err != nil { return nil, err } } err = spt.Refresh() if err != nil { return nil, err } tc := NewTokenCredential(spt.Token().AccessToken, func(tc TokenCredential) time.Duration { _ = spt.Refresh() return time.Until(spt.Token().Expires()) }) return &tc, nil } func getGenericBSU(accountType string) (ServiceURL, error) { credential, err := getGenericCredential(accountType) if err != nil { return ServiceURL{}, err } pipeline := NewPipeline(credential, PipelineOptions{}) blobPrimaryURL, _ := url.Parse("https://" + credential.AccountName() + ".blob.core.windows.net/") return NewServiceURL(*blobPrimaryURL, pipeline), nil } func getBSU() ServiceURL { bsu, _ := getGenericBSU("") return bsu } func getAlternateBSU() (ServiceURL, error) { return getGenericBSU("SECONDARY_") } func getPremiumBSU() (ServiceURL, error) { return getGenericBSU("PREMIUM_") } func getBlobStorageBSU() (ServiceURL, error) { return getGenericBSU("BLOB_STORAGE_") } func validateStorageError(c *chk.C, err error, code ServiceCodeType) { serr, _ := err.(StorageError) c.Assert(serr.ServiceCode(), chk.Equals, code) } func getRelativeTimeGMT(amount time.Duration) time.Time { currentTime := time.Now().In(time.FixedZone("GMT", 0)) currentTime = currentTime.Add(amount * time.Second) return currentTime } func generateCurrentTimeWithModerateResolution() time.Time { highResolutionTime := time.Now().UTC() return time.Date(highResolutionTime.Year(), highResolutionTime.Month(), highResolutionTime.Day(), highResolutionTime.Hour(), highResolutionTime.Minute(), highResolutionTime.Second(), 0, highResolutionTime.Location()) } // Some tests require setting service properties. It can take up to 30 seconds for the new properties to be reflected across all FEs. // We will enable the necessary property and try to run the test implementation. If it fails with an error that should be due to // those changes not being reflected yet, we will wait 30 seconds and try the test again. If it fails this time for any reason, // we fail the test. It is the responsibility of the the testImplFunc to determine which error string indicates the test should be retried. // There can only be one such string. All errors that cannot be due to this detail should be asserted and not returned as an error string. func runTestRequiringServiceProperties(c *chk.C, bsu ServiceURL, code string, enableServicePropertyFunc func(*chk.C, ServiceURL), testImplFunc func(*chk.C, ServiceURL) error, disableServicePropertyFunc func(*chk.C, ServiceURL)) { enableServicePropertyFunc(c, bsu) defer disableServicePropertyFunc(c, bsu) err := testImplFunc(c, bsu) // We cannot assume that the error indicative of slow update will necessarily be a StorageError. As in ListBlobs. if err != nil && err.Error() == code { time.Sleep(time.Second * 30) err = testImplFunc(c, bsu) c.Assert(err, chk.IsNil) } } func enableSoftDelete(c *chk.C, bsu ServiceURL) { days := int32(1) _, err := bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: true, Days: &days}}) c.Assert(err, chk.IsNil) } func disableSoftDelete(c *chk.C, bsu ServiceURL) { _, err := bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: false}}) c.Assert(err, chk.IsNil) } func validateUpload(c *chk.C, blobURL BlockBlobURL) { resp, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) data, _ := ioutil.ReadAll(resp.Response().Body) c.Assert(data, chk.HasLen, 0) } azure-storage-blob-go-0.10.0/azblob/zt_url_append_blob_test.go000066400000000000000000000631301367515646300244250ustar00rootroot00000000000000package azblob import ( "context" "io/ioutil" "time" "crypto/md5" "bytes" "strings" chk "gopkg.in/check.v1" // go get gopkg.in/check.v1 ) func (s *aztestsSuite) TestAppendBlock(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer delContainer(c, container) blob := container.NewAppendBlobURL(generateBlobName()) resp, err := blob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.StatusCode(), chk.Equals, 201) appendResp, err := blob.AppendBlock(context.Background(), getReaderToRandomBytes(1024), AppendBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(appendResp.Response().StatusCode, chk.Equals, 201) c.Assert(appendResp.BlobAppendOffset(), chk.Equals, "0") c.Assert(appendResp.BlobCommittedBlockCount(), chk.Equals, int32(1)) c.Assert(appendResp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(appendResp.LastModified().IsZero(), chk.Equals, false) c.Assert(appendResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(appendResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(appendResp.Version(), chk.Not(chk.Equals), "") c.Assert(appendResp.Date().IsZero(), chk.Equals, false) appendResp, err = blob.AppendBlock(context.Background(), getReaderToRandomBytes(1024), AppendBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(appendResp.BlobAppendOffset(), chk.Equals, "1024") c.Assert(appendResp.BlobCommittedBlockCount(), chk.Equals, int32(2)) } func (s *aztestsSuite) TestAppendBlockWithMD5(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer delContainer(c, container) // set up blob to test blob := container.NewAppendBlobURL(generateBlobName()) resp, err := blob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.StatusCode(), chk.Equals, 201) // test append block with valid MD5 value readerToBody, body := getRandomDataAndReader(1024) md5Value := md5.Sum(body) appendResp, err := blob.AppendBlock(context.Background(), readerToBody, AppendBlobAccessConditions{}, md5Value[:]) c.Assert(err, chk.IsNil) c.Assert(appendResp.Response().StatusCode, chk.Equals, 201) c.Assert(appendResp.BlobAppendOffset(), chk.Equals, "0") c.Assert(appendResp.BlobCommittedBlockCount(), chk.Equals, int32(1)) c.Assert(appendResp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(appendResp.LastModified().IsZero(), chk.Equals, false) c.Assert(appendResp.ContentMD5(), chk.DeepEquals, md5Value[:]) c.Assert(appendResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(appendResp.Version(), chk.Not(chk.Equals), "") c.Assert(appendResp.Date().IsZero(), chk.Equals, false) // test append block with bad MD5 value readerToBody, body = getRandomDataAndReader(1024) _, badMD5 := getRandomDataAndReader(16) appendResp, err = blob.AppendBlock(context.Background(), readerToBody, AppendBlobAccessConditions{}, badMD5[:]) validateStorageError(c, err, ServiceCodeMd5Mismatch) } func (s *aztestsSuite) TestAppendBlockFromURL(c *chk.C) { bsu := getBSU() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } container, _ := createNewContainer(c, bsu) defer delContainer(c, container) testSize := 4 * 1024 * 1024 // 4MB r, sourceData := getRandomDataAndReader(testSize) ctx := context.Background() // Use default Background context srcBlob := container.NewAppendBlobURL(generateName("appendsrc")) destBlob := container.NewAppendBlobURL(generateName("appenddest")) // Prepare source blob for copy. cResp1, err := srcBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(cResp1.StatusCode(), chk.Equals, 201) appendResp, err := srcBlob.AppendBlock(context.Background(), r, AppendBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(appendResp.Response().StatusCode, chk.Equals, 201) c.Assert(appendResp.BlobAppendOffset(), chk.Equals, "0") c.Assert(appendResp.BlobCommittedBlockCount(), chk.Equals, int32(1)) c.Assert(appendResp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(appendResp.LastModified().IsZero(), chk.Equals, false) c.Assert(appendResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(appendResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(appendResp.Version(), chk.Not(chk.Equals), "") c.Assert(appendResp.Date().IsZero(), chk.Equals, false) // Get source blob URL with SAS for AppendBlockFromURL. srcBlobParts := NewBlobURLParts(srcBlob.URL()) srcBlobParts.SAS, err = BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration ContainerName: srcBlobParts.ContainerName, BlobName: srcBlobParts.BlobName, Permissions: BlobSASPermissions{Read: true}.String(), }.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } srcBlobURLWithSAS := srcBlobParts.URL() // Append block from URL. cResp2, err := destBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(cResp2.StatusCode(), chk.Equals, 201) appendFromURLResp, err := destBlob.AppendBlockFromURL(ctx, srcBlobURLWithSAS, 0, int64(testSize), AppendBlobAccessConditions{}, ModifiedAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(appendFromURLResp.Response().StatusCode, chk.Equals, 201) c.Assert(appendFromURLResp.BlobAppendOffset(), chk.Equals, "0") c.Assert(appendFromURLResp.BlobCommittedBlockCount(), chk.Equals, int32(1)) c.Assert(appendFromURLResp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(appendFromURLResp.LastModified().IsZero(), chk.Equals, false) c.Assert(appendFromURLResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(appendFromURLResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(appendFromURLResp.Version(), chk.Not(chk.Equals), "") c.Assert(appendFromURLResp.Date().IsZero(), chk.Equals, false) // Check data integrity through downloading. downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{})) c.Assert(err, chk.IsNil) c.Assert(destData, chk.DeepEquals, sourceData) } func (s *aztestsSuite) TestAppendBlockFromURLWithMD5(c *chk.C) { bsu := getBSU() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } container, _ := createNewContainer(c, bsu) defer delContainer(c, container) testSize := 4 * 1024 * 1024 // 4MB r, sourceData := getRandomDataAndReader(testSize) md5Value := md5.Sum(sourceData) ctx := context.Background() // Use default Background context srcBlob := container.NewAppendBlobURL(generateName("appendsrc")) destBlob := container.NewAppendBlobURL(generateName("appenddest")) // Prepare source blob for copy. cResp1, err := srcBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(cResp1.StatusCode(), chk.Equals, 201) appendResp, err := srcBlob.AppendBlock(context.Background(), r, AppendBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(appendResp.Response().StatusCode, chk.Equals, 201) c.Assert(appendResp.BlobAppendOffset(), chk.Equals, "0") c.Assert(appendResp.BlobCommittedBlockCount(), chk.Equals, int32(1)) c.Assert(appendResp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(appendResp.LastModified().IsZero(), chk.Equals, false) c.Assert(appendResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(appendResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(appendResp.Version(), chk.Not(chk.Equals), "") c.Assert(appendResp.Date().IsZero(), chk.Equals, false) // Get source blob URL with SAS for AppendBlockFromURL. srcBlobParts := NewBlobURLParts(srcBlob.URL()) srcBlobParts.SAS, err = BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration ContainerName: srcBlobParts.ContainerName, BlobName: srcBlobParts.BlobName, Permissions: BlobSASPermissions{Read: true}.String(), }.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } srcBlobURLWithSAS := srcBlobParts.URL() // Append block from URL. cResp2, err := destBlob.Create(context.Background(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(cResp2.StatusCode(), chk.Equals, 201) appendFromURLResp, err := destBlob.AppendBlockFromURL(ctx, srcBlobURLWithSAS, 0, int64(testSize), AppendBlobAccessConditions{}, ModifiedAccessConditions{}, md5Value[:]) c.Assert(err, chk.IsNil) c.Assert(appendFromURLResp.Response().StatusCode, chk.Equals, 201) c.Assert(appendFromURLResp.BlobAppendOffset(), chk.Equals, "0") c.Assert(appendFromURLResp.BlobCommittedBlockCount(), chk.Equals, int32(1)) c.Assert(appendFromURLResp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(appendFromURLResp.LastModified().IsZero(), chk.Equals, false) c.Assert(appendFromURLResp.ContentMD5(), chk.DeepEquals, md5Value[:]) c.Assert(appendFromURLResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(appendFromURLResp.Version(), chk.Not(chk.Equals), "") c.Assert(appendFromURLResp.Date().IsZero(), chk.Equals, false) // Check data integrity through downloading. downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{})) c.Assert(err, chk.IsNil) c.Assert(destData, chk.DeepEquals, sourceData) // Test append block from URL with bad MD5 value _, badMD5 := getRandomDataAndReader(16) _, err = destBlob.AppendBlockFromURL(ctx, srcBlobURLWithSAS, 0, int64(testSize), AppendBlobAccessConditions{}, ModifiedAccessConditions{}, badMD5) validateStorageError(c, err, ServiceCodeMd5Mismatch) } func (s *aztestsSuite) TestBlobCreateAppendMetadataNonEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getAppendBlobURL(c, containerURL) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobCreateAppendMetadataEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getAppendBlobURL(c, containerURL) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestBlobCreateAppendMetadataInvalid(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getAppendBlobURL(c, containerURL) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, Metadata{"In valid!": "bar"}, BlobAccessConditions{}) c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true) } func (s *aztestsSuite) TestBlobCreateAppendHTTPHeaders(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getAppendBlobURL(c, containerURL) _, err := blobURL.Create(ctx, basicHeaders, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) h := resp.NewHTTPHeaders() c.Assert(h, chk.DeepEquals, basicHeaders) } func validateAppendBlobPut(c *chk.C, blobURL AppendBlobURL) { resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobCreateAppendIfModifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateAppendBlobPut(c, blobURL) } func (s *aztestsSuite) TestBlobCreateAppendIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobCreateAppendIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateAppendBlobPut(c, blobURL) } func (s *aztestsSuite) TestBlobCreateAppendIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobCreateAppendIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validateAppendBlobPut(c, blobURL) } func (s *aztestsSuite) TestBlobCreateAppendIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobCreateAppendIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validateAppendBlobPut(c, blobURL) } func (s *aztestsSuite) TestBlobCreateAppendIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.Create(ctx, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobAppendBlockNilBody(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, bytes.NewReader(nil), AppendBlobAccessConditions{}, nil) c.Assert(err, chk.NotNil) validateStorageError(c, err, ServiceCodeInvalidHeaderValue) } func (s *aztestsSuite) TestBlobAppendBlockEmptyBody(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(""), AppendBlobAccessConditions{}, nil) validateStorageError(c, err, ServiceCodeInvalidHeaderValue) } func (s *aztestsSuite) TestBlobAppendBlockNonExistantBlob(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getAppendBlobURL(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{}, nil) validateStorageError(c, err, ServiceCodeBlobNotFound) } func validateBlockAppended(c *chk.C, blobURL AppendBlobURL, expectedSize int) { resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.ContentLength(), chk.Equals, int64(expectedSize)) } func (s *aztestsSuite) TestBlobAppendBlockIfModifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, nil) c.Assert(err, chk.IsNil) validateBlockAppended(c, blobURL, len(blockBlobDefaultData)) } func (s *aztestsSuite) TestBlobAppendBlockIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, nil) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobAppendBlockIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, nil) c.Assert(err, chk.IsNil) validateBlockAppended(c, blobURL, len(blockBlobDefaultData)) } func (s *aztestsSuite) TestBlobAppendBlockIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, nil) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobAppendBlockIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}, nil) c.Assert(err, chk.IsNil) validateBlockAppended(c, blobURL, len(blockBlobDefaultData)) } func (s *aztestsSuite) TestBlobAppendBlockIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}, nil) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobAppendBlockIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}, nil) c.Assert(err, chk.IsNil) validateBlockAppended(c, blobURL, len(blockBlobDefaultData)) } func (s *aztestsSuite) TestBlobAppendBlockIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}, nil) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobAppendBlockIfAppendPositionMatchTrueNegOne(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{AppendPositionAccessConditions: AppendPositionAccessConditions{IfAppendPositionEqual: -1}}, nil) // This will cause the library to set the value of the header to 0 c.Assert(err, chk.IsNil) validateBlockAppended(c, blobURL, len(blockBlobDefaultData)) } func (s *aztestsSuite) TestBlobAppendBlockIfAppendPositionMatchZero(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{}, nil) // The position will not match, but the condition should be ignored c.Assert(err, chk.IsNil) _, err = blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{AppendPositionAccessConditions: AppendPositionAccessConditions{IfAppendPositionEqual: 0}}, nil) c.Assert(err, chk.IsNil) validateBlockAppended(c, blobURL, 2*len(blockBlobDefaultData)) } func (s *aztestsSuite) TestBlobAppendBlockIfAppendPositionMatchTrueNonZero(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{AppendPositionAccessConditions: AppendPositionAccessConditions{IfAppendPositionEqual: int64(len(blockBlobDefaultData))}}, nil) c.Assert(err, chk.IsNil) validateBlockAppended(c, blobURL, len(blockBlobDefaultData)*2) } func (s *aztestsSuite) TestBlobAppendBlockIfAppendPositionMatchFalseNegOne(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{AppendPositionAccessConditions: AppendPositionAccessConditions{IfAppendPositionEqual: -1}}, nil) // This will cause the library to set the value of the header to 0 validateStorageError(c, err, ServiceCodeAppendPositionConditionNotMet) } func (s *aztestsSuite) TestBlobAppendBlockIfAppendPositionMatchFalseNonZero(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{AppendPositionAccessConditions: AppendPositionAccessConditions{IfAppendPositionEqual: 12}}, nil) validateStorageError(c, err, ServiceCodeAppendPositionConditionNotMet) } func (s *aztestsSuite) TestBlobAppendBlockIfMaxSizeTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{AppendPositionAccessConditions: AppendPositionAccessConditions{IfMaxSizeLessThanOrEqual: int64(len(blockBlobDefaultData) + 1)}}, nil) c.Assert(err, chk.IsNil) validateBlockAppended(c, blobURL, len(blockBlobDefaultData)) } func (s *aztestsSuite) TestBlobAppendBlockIfMaxSizeFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewAppendBlob(c, containerURL) _, err := blobURL.AppendBlock(ctx, strings.NewReader(blockBlobDefaultData), AppendBlobAccessConditions{AppendPositionAccessConditions: AppendPositionAccessConditions{IfMaxSizeLessThanOrEqual: int64(len(blockBlobDefaultData) - 1)}}, nil) validateStorageError(c, err, ServiceCodeMaxBlobSizeConditionNotMet) } azure-storage-blob-go-0.10.0/azblob/zt_url_blob_test.go000066400000000000000000002171031367515646300230770ustar00rootroot00000000000000package azblob import ( "crypto/md5" "io" "io/ioutil" "bytes" "errors" "net/url" "os" "strings" "time" chk "gopkg.in/check.v1" // go get gopkg.in/check.v1 ) func (s *aztestsSuite) TestCreateBlobURL(c *chk.C) { bsu := getBSU() containerURL, containerName := getContainerURL(c, bsu) testURL, testName := getBlockBlobURL(c, containerURL) parts := NewBlobURLParts(testURL.URL()) c.Assert(parts.BlobName, chk.Equals, testName) c.Assert(parts.ContainerName, chk.Equals, containerName) correctURL := "https://" + os.Getenv("ACCOUNT_NAME") + ".blob.core.windows.net/" + containerName + "/" + testName temp := testURL.URL() c.Assert(temp.String(), chk.Equals, correctURL) } func (s *aztestsSuite) TestCreateBlobURLWithSnapshotAndSAS(c *chk.C) { bsu := getBSU() containerURL, containerName := getContainerURL(c, bsu) blobURL, blobName := getBlockBlobURL(c, containerURL) currentTime := time.Now().UTC() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } sasQueryParams, err := AccountSASSignatureValues{ Protocol: SASProtocolHTTPS, ExpiryTime: currentTime.Add(48 * time.Hour), Permissions: AccountSASPermissions{Read: true, List: true}.String(), Services: AccountSASServices{Blob: true}.String(), ResourceTypes: AccountSASResourceTypes{Container: true, Object: true}.String(), }.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } parts := NewBlobURLParts(blobURL.URL()) parts.SAS = sasQueryParams parts.Snapshot = currentTime.Format(SnapshotTimeFormat) testURL := parts.URL() // The snapshot format string is taken from the snapshotTimeFormat value in parsing_urls.go. The field is not public, so // it is copied here correctURL := "https://" + os.Getenv("ACCOUNT_NAME") + ".blob.core.windows.net/" + containerName + "/" + blobName + "?" + "snapshot=" + currentTime.Format("2006-01-02T15:04:05.0000000Z07:00") + "&" + sasQueryParams.Encode() c.Assert(testURL.String(), chk.Equals, correctURL) } func (s *aztestsSuite) TestBlobWithNewPipeline(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) blobURL := containerURL.NewBlockBlobURL(blobPrefix) newBlobURL := blobURL.WithPipeline(testPipeline{}) _, err := newBlobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.NotNil) c.Assert(err.Error(), chk.Equals, testPipelineMessage) } func waitForCopy(c *chk.C, copyBlobURL BlockBlobURL, blobCopyResponse *BlobStartCopyFromURLResponse) { status := blobCopyResponse.CopyStatus() // Wait for the copy to finish. If the copy takes longer than a minute, we will fail start := time.Now() for status != CopyStatusSuccess { props, _ := copyBlobURL.GetProperties(ctx, BlobAccessConditions{}) status = props.CopyStatus() currentTime := time.Now() if currentTime.Sub(start) >= time.Minute { c.Fail() } } } func (s *aztestsSuite) TestBlobStartCopyDestEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) copyBlobURL, _ := getBlockBlobURL(c, containerURL) blobCopyResponse, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) waitForCopy(c, copyBlobURL, blobCopyResponse) resp, err := copyBlobURL.Download(ctx, 0, 20, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) // Read the blob data to verify the copy data, err := ioutil.ReadAll(resp.Response().Body) c.Assert(resp.ContentLength(), chk.Equals, int64(len(blockBlobDefaultData))) c.Assert(string(data), chk.Equals, blockBlobDefaultData) resp.Body(RetryReaderOptions{}).Close() } func (s *aztestsSuite) TestBlobStartCopyMetadata(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) copyBlobURL, _ := getBlockBlobURL(c, containerURL) resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) waitForCopy(c, copyBlobURL, resp) resp2, err := copyBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobStartCopyMetadataNil(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) copyBlobURL, _ := getBlockBlobURL(c, containerURL) // Have the destination start with metadata so we ensure the nil metadata passed later takes effect _, err := copyBlobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) waitForCopy(c, copyBlobURL, resp) resp2, err := copyBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestBlobStartCopyMetadataEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) copyBlobURL, _ := getBlockBlobURL(c, containerURL) // Have the destination start with metadata so we ensure the empty metadata passed later takes effect _, err := copyBlobURL.Upload(ctx, bytes.NewReader([]byte("data")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) waitForCopy(c, copyBlobURL, resp) resp2, err := copyBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestBlobStartCopyMetadataInvalidField(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) copyBlobURL, _ := getBlockBlobURL(c, containerURL) _, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), Metadata{"I nvalid.": "bar"}, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.NotNil) c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true) } func (s *aztestsSuite) TestBlobStartCopySourceNonExistant(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) copyBlobURL, _ := getBlockBlobURL(c, containerURL) _, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeBlobNotFound) } func (s *aztestsSuite) TestBlobStartCopySourcePrivate(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessNone, nil, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) blobURL, _ := createNewBlockBlob(c, containerURL) bsu2, err := getAlternateBSU() if err != nil { c.Skip(err.Error()) return } copyContainerURL, _ := createNewContainer(c, bsu2) defer deleteContainer(c, copyContainerURL) copyBlobURL, _ := getBlockBlobURL(c, copyContainerURL) if bsu.String() == bsu2.String() { c.Skip("Test not valid because primary and secondary accounts are the same") } _, err = copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeCannotVerifyCopySource) } func (s *aztestsSuite) TestBlobStartCopyUsingSASSrc(c *chk.C) { bsu := getBSU() containerURL, containerName := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessNone, nil, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) blobURL, blobName := createNewBlockBlob(c, containerURL) // Create sas values for the source blob credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } serviceSASValues := BlobSASSignatureValues{StartTime: time.Now().Add(-1 * time.Hour).UTC(), ExpiryTime: time.Now().Add(time.Hour).UTC(), Permissions: BlobSASPermissions{Read: true, Write: true}.String(), ContainerName: containerName, BlobName: blobName} queryParams, err := serviceSASValues.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } // Create URLs to the destination blob with sas parameters sasURL := blobURL.URL() sasURL.RawQuery = queryParams.Encode() // Create a new container for the destination bsu2, err := getAlternateBSU() if err != nil { c.Skip(err.Error()) return } copyContainerURL, _ := createNewContainer(c, bsu2) defer deleteContainer(c, copyContainerURL) copyBlobURL, _ := getBlockBlobURL(c, copyContainerURL) resp, err := copyBlobURL.StartCopyFromURL(ctx, sasURL, nil, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) waitForCopy(c, copyBlobURL, resp) resp2, err := copyBlobURL.Download(ctx, 0, int64(len(blockBlobDefaultData)), BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) data, err := ioutil.ReadAll(resp2.Response().Body) c.Assert(resp2.ContentLength(), chk.Equals, int64(len(blockBlobDefaultData))) c.Assert(string(data), chk.Equals, blockBlobDefaultData) resp2.Body(RetryReaderOptions{}).Close() } func (s *aztestsSuite) TestBlobStartCopyUsingSASDest(c *chk.C) { bsu := getBSU() containerURL, containerName := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessNone, nil, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) blobURL, blobName := createNewBlockBlob(c, containerURL) _ = blobURL // Generate SAS on the source serviceSASValues := BlobSASSignatureValues{ExpiryTime: time.Now().Add(time.Hour).UTC(), Permissions: BlobSASPermissions{Read: true, Write: true, Create: true}.String(), ContainerName: containerName, BlobName: blobName} credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } queryParams, err := serviceSASValues.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } // Create destination container bsu2, err := getAlternateBSU() if err != nil { c.Skip(err.Error()) return } copyContainerURL, copyContainerName := createNewContainer(c, bsu2) defer deleteContainer(c, copyContainerURL) copyBlobURL, copyBlobName := getBlockBlobURL(c, copyContainerURL) // Generate Sas for the destination credential, err = getGenericCredential("SECONDARY_") if err != nil { c.Fatal("Invalid secondary credential") } copyServiceSASvalues := BlobSASSignatureValues{StartTime: time.Now().Add(-1 * time.Hour).UTC(), ExpiryTime: time.Now().Add(time.Hour).UTC(), Permissions: BlobSASPermissions{Read: true, Write: true}.String(), ContainerName: copyContainerName, BlobName: copyBlobName} copyQueryParams, err := copyServiceSASvalues.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } // Generate anonymous URL to destination with SAS anonURL := bsu2.URL() anonURL.RawQuery = copyQueryParams.Encode() anonPipeline := NewPipeline(NewAnonymousCredential(), PipelineOptions{}) anonBSU := NewServiceURL(anonURL, anonPipeline) anonContainerURL := anonBSU.NewContainerURL(copyContainerName) anonBlobURL := anonContainerURL.NewBlockBlobURL(copyBlobName) // Apply sas to source srcBlobWithSasURL := blobURL.URL() srcBlobWithSasURL.RawQuery = queryParams.Encode() resp, err := anonBlobURL.StartCopyFromURL(ctx, srcBlobWithSasURL, nil, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) // Allow copy to happen waitForCopy(c, anonBlobURL, resp) resp2, err := copyBlobURL.Download(ctx, 0, int64(len(blockBlobDefaultData)), BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) data, err := ioutil.ReadAll(resp2.Response().Body) _, err = resp2.Body(RetryReaderOptions{}).Read(data) c.Assert(resp2.ContentLength(), chk.Equals, int64(len(blockBlobDefaultData))) c.Assert(string(data), chk.Equals, blockBlobDefaultData) resp2.Body(RetryReaderOptions{}).Close() } func (s *aztestsSuite) TestBlobStartCopySourceIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := getBlockBlobURL(c, containerURL) _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfModifiedSince: currentTime}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobStartCopySourceIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) destBlobURL, _ := getBlockBlobURL(c, containerURL) _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{IfModifiedSince: currentTime}, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeSourceConditionNotMet) } func (s *aztestsSuite) TestBlobStartCopySourceIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) destBlobURL, _ := getBlockBlobURL(c, containerURL) _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfUnmodifiedSince: currentTime}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobStartCopySourceIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) destBlobURL, _ := getBlockBlobURL(c, containerURL) _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{IfUnmodifiedSince: currentTime}, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeSourceConditionNotMet) } func (s *aztestsSuite) TestBlobStartCopySourceIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) etag := resp.ETag() destBlobURL, _ := getBlockBlobURL(c, containerURL) _, err = destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfMatch: etag}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp2, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobStartCopySourceIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := getBlockBlobURL(c, containerURL) _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfMatch: "a"}, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeSourceConditionNotMet) } func (s *aztestsSuite) TestBlobStartCopySourceIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := getBlockBlobURL(c, containerURL) _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{IfNoneMatch: "a"}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp2, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobStartCopySourceIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) etag := resp.ETag() destBlobURL, _ := getBlockBlobURL(c, containerURL) _, err = destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{IfNoneMatch: etag}, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeSourceConditionNotMet) } func (s *aztestsSuite) TestBlobStartCopyDestIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := createNewBlockBlob(c, containerURL) // The blob must exist to have a last-modified time _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) resp, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobStartCopyDestIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeTargetConditionNotMet) } func (s *aztestsSuite) TestBlobStartCopyDestIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) resp, err := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobStartCopyDestIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) destBlobURL, _ := createNewBlockBlob(c, containerURL) _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeTargetConditionNotMet) } func (s *aztestsSuite) TestBlobStartCopyDestIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := createNewBlockBlob(c, containerURL) resp, _ := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) etag := resp.ETag() _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}}) c.Assert(err, chk.IsNil) resp, err = destBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobStartCopyDestIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := createNewBlockBlob(c, containerURL) resp, _ := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) etag := resp.ETag() destBlobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) // SetMetadata chances the blob's etag _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}}) validateStorageError(c, err, ServiceCodeTargetConditionNotMet) } func (s *aztestsSuite) TestBlobStartCopyDestIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := createNewBlockBlob(c, containerURL) resp, _ := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) etag := resp.ETag() destBlobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) // SetMetadata chances the blob's etag _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), basicMetadata, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}}) c.Assert(err, chk.IsNil) resp, err = destBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobStartCopyDestIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) destBlobURL, _ := createNewBlockBlob(c, containerURL) resp, _ := destBlobURL.GetProperties(ctx, BlobAccessConditions{}) etag := resp.ETag() _, err := destBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}}) validateStorageError(c, err, ServiceCodeTargetConditionNotMet) } func (s *aztestsSuite) TestBlobAbortCopyInProgress(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) // Create a large blob that takes time to copy blobSize := 8 * 1024 * 1024 blobData := make([]byte, blobSize, blobSize) for i := range blobData { blobData[i] = byte('a' + i%26) } _, err := blobURL.Upload(ctx, bytes.NewReader(blobData), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) containerURL.SetAccessPolicy(ctx, PublicAccessBlob, nil, ContainerAccessConditions{}) // So that we don't have to create a SAS // Must copy across accounts so it takes time to copy bsu2, err := getAlternateBSU() if err != nil { c.Skip(err.Error()) return } copyContainerURL, _ := createNewContainer(c, bsu2) copyBlobURL, _ := getBlockBlobURL(c, copyContainerURL) defer deleteContainer(c, copyContainerURL) resp, err := copyBlobURL.StartCopyFromURL(ctx, blobURL.URL(), nil, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CopyStatus(), chk.Equals, CopyStatusPending) _, err = copyBlobURL.AbortCopyFromURL(ctx, resp.CopyID(), LeaseAccessConditions{}) if err != nil { // If the error is nil, the test continues as normal. // If the error is not nil, we want to check if it's because the copy is finished and send a message indicating this. validateStorageError(c, err, ServiceCodeNoPendingCopyOperation) c.Error("The test failed because the copy completed because it was aborted") } resp2, _ := copyBlobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(resp2.CopyStatus(), chk.Equals, CopyStatusAborted) } func (s *aztestsSuite) TestBlobAbortCopyNoCopyStarted(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) copyBlobURL, _ := getBlockBlobURL(c, containerURL) _, err := copyBlobURL.AbortCopyFromURL(ctx, "copynotstarted", LeaseAccessConditions{}) validateStorageError(c, err, ServiceCodeInvalidQueryParameterValue) } func (s *aztestsSuite) TestBlobSnapshotMetadata(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.CreateSnapshot(ctx, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) // Since metadata is specified on the snapshot, the snapshot should have its own metadata different from the (empty) metadata on the source snapshotURL := blobURL.WithSnapshot(resp.Snapshot()) resp2, err := snapshotURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobSnapshotMetadataEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.CreateSnapshot(ctx, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) // In this case, because no metadata was specified, it should copy the basicMetadata from the source snapshotURL := blobURL.WithSnapshot(resp.Snapshot()) resp2, err := snapshotURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobSnapshotMetadataNil(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) snapshotURL := blobURL.WithSnapshot(resp.Snapshot()) resp2, err := snapshotURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobSnapshotMetadataInvalid(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.CreateSnapshot(ctx, Metadata{"Invalid Field!": "value"}, BlobAccessConditions{}) c.Assert(err, chk.NotNil) c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true) } func (s *aztestsSuite) TestBlobSnapshotBlobNotExist(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeBlobNotFound) } func (s *aztestsSuite) TestBlobSnapshotOfSnapshot(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) snapshotURL := blobURL.WithSnapshot(time.Now().UTC().Format(SnapshotTimeFormat)) // The library allows the server to handle the snapshot of snapshot error _, err := snapshotURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeInvalidQueryParameterValue) } func (s *aztestsSuite) TestBlobSnapshotIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) c.Assert(resp.Snapshot() != "", chk.Equals, true) // i.e. The snapshot time is not zero. If the service gives us back a snapshot time, it successfully created a snapshot } func (s *aztestsSuite) TestBlobSnapshotIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSnapshotIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) resp, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) c.Assert(resp.Snapshot() == "", chk.Equals, false) } func (s *aztestsSuite) TestBlobSnapshotIfUnmodifiedSinceFalse(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSnapshotIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) resp2, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) c.Assert(resp2.Snapshot() == "", chk.Equals, false) } func (s *aztestsSuite) TestBlobSnapshotIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: "garbage"}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSnapshotIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: "garbage"}}) c.Assert(err, chk.IsNil) c.Assert(resp.Snapshot() == "", chk.Equals, false) } func (s *aztestsSuite) TestBlobSnapshotIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err = blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobDownloadDataNonExistantBlob(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) validateStorageError(c, err, ServiceCodeBlobNotFound) } func (s *aztestsSuite) TestBlobDownloadDataNegativeOffset(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.Download(ctx, -1, 0, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) } func (s *aztestsSuite) TestBlobDownloadDataOffsetOutOfRange(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.Download(ctx, int64(len(blockBlobDefaultData)), CountToEnd, BlobAccessConditions{}, false) validateStorageError(c, err, ServiceCodeInvalidRange) } func (s *aztestsSuite) TestBlobDownloadDataCountNegative(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.Download(ctx, 0, -2, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) } func (s *aztestsSuite) TestBlobDownloadDataCountZero(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) // Specifying a count of 0 results in the value being ignored data, err := ioutil.ReadAll(resp.Response().Body) c.Assert(err, chk.IsNil) c.Assert(string(data), chk.Equals, blockBlobDefaultData) } func (s *aztestsSuite) TestBlobDownloadDataCountExact(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.Download(ctx, 0, int64(len(blockBlobDefaultData)), BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) data, err := ioutil.ReadAll(resp.Response().Body) c.Assert(err, chk.IsNil) c.Assert(string(data), chk.Equals, blockBlobDefaultData) } func (s *aztestsSuite) TestBlobDownloadDataCountOutOfRange(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.Download(ctx, 0, int64(len(blockBlobDefaultData))*2, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) data, err := ioutil.ReadAll(resp.Response().Body) c.Assert(err, chk.IsNil) c.Assert(string(data), chk.Equals, blockBlobDefaultData) } func (s *aztestsSuite) TestBlobDownloadDataEmptyRangeStruct(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) data, err := ioutil.ReadAll(resp.Response().Body) c.Assert(err, chk.IsNil) c.Assert(string(data), chk.Equals, blockBlobDefaultData) } func (s *aztestsSuite) TestBlobDownloadDataContentMD5(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.Download(ctx, 10, 3, BlobAccessConditions{}, true) c.Assert(err, chk.IsNil) mdf := md5.Sum([]byte(blockBlobDefaultData)[10:13]) c.Assert(resp.ContentMD5(), chk.DeepEquals, mdf[:]) } func (s *aztestsSuite) TestBlobDownloadDataIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, false) c.Assert(err, chk.IsNil) c.Assert(resp.ContentLength(), chk.Equals, int64(len(blockBlobDefaultData))) } func (s *aztestsSuite) TestBlobDownloadDataIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, false) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 304) // The server does not return the error in the body even though it is a GET } func (s *aztestsSuite) TestBlobDownloadDataIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) resp, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, false) c.Assert(err, chk.IsNil) c.Assert(resp.ContentLength(), chk.Equals, int64(len(blockBlobDefaultData))) } func (s *aztestsSuite) TestBlobDownloadDataIfUnmodifiedSinceFalse(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, false) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobDownloadDataIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) etag := resp.ETag() resp2, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}}, false) c.Assert(err, chk.IsNil) c.Assert(resp2.ContentLength(), chk.Equals, int64(len(blockBlobDefaultData))) } func (s *aztestsSuite) TestBlobDownloadDataIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) etag := resp.ETag() blobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) _, err = blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}}, false) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobDownloadDataIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) etag := resp.ETag() blobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) resp2, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}}, false) c.Assert(err, chk.IsNil) c.Assert(resp2.ContentLength(), chk.Equals, int64(len(blockBlobDefaultData))) } func (s *aztestsSuite) TestBlobDownloadDataIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) etag := resp.ETag() _, err = blobURL.Download(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}}, false) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 304) // The server does not return the error in the body even though it is a GET } func (s *aztestsSuite) TestBlobDeleteNonExistant(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionInclude, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeBlobNotFound) } func (s *aztestsSuite) TestBlobDeleteSnapshot(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) snapshotURL := blobURL.WithSnapshot(resp.Snapshot()) _, err = snapshotURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) c.Assert(err, chk.IsNil) validateBlobDeleted(c, snapshotURL) } func (s *aztestsSuite) TestBlobDeleteSnapshotsInclude(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.Delete(ctx, DeleteSnapshotsOptionInclude, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, _ := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Snapshots: true}}) c.Assert(resp.Segment.BlobItems, chk.HasLen, 0) } func (s *aztestsSuite) TestBlobDeleteSnapshotsOnly(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.Delete(ctx, DeleteSnapshotsOptionOnly, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, _ := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Snapshots: true}}) c.Assert(resp.Segment.BlobItems, chk.HasLen, 1) c.Assert(resp.Segment.BlobItems[0].Snapshot == "", chk.Equals, true) } func (s *aztestsSuite) TestBlobDeleteSnapshotsNoneWithSnapshots(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeSnapshotsPresent) } func validateBlobDeleted(c *chk.C, blobURL BlockBlobURL) { _, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.NotNil) serr := err.(StorageError) // Delete blob is a HEAD request and does not return a ServiceCode in the body c.Assert(serr.Response().StatusCode, chk.Equals, 404) } func (s *aztestsSuite) TestBlobDeleteIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateBlobDeleted(c, blobURL) } func (s *aztestsSuite) TestBlobDeleteIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobDeleteIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateBlobDeleted(c, blobURL) } func (s *aztestsSuite) TestBlobDeleteIfUnmodifiedSinceFalse(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobDeleteIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) etag := resp.ETag() _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}}) c.Assert(err, chk.IsNil) validateBlobDeleted(c, blobURL) } func (s *aztestsSuite) TestBlobDeleteIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) etag := resp.ETag() blobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: etag}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobDeleteIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) etag := resp.ETag() blobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}}) c.Assert(err, chk.IsNil) validateBlobDeleted(c, blobURL) } func (s *aztestsSuite) TestBlobDeleteIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) etag := resp.ETag() _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: etag}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobGetPropsAndMetadataIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobGetPropsAndMetadataIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) currentTime := getRelativeTimeGMT(10) _, err = blobURL.GetProperties(ctx, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.NotNil) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 304) // No service code returned for a HEAD } func (s *aztestsSuite) TestBlobGetPropsAndMetadataIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) currentTime := getRelativeTimeGMT(10) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobGetPropsAndMetadataIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.GetProperties(ctx, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.NotNil) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 412) } func (s *aztestsSuite) TestBlobGetPropsAndMetadataIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp2, err := blobURL.GetProperties(ctx, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) c.Assert(resp2.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobGetPropsOnMissingBlob(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL := containerURL.NewBlobURL("MISSING") _, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.NotNil) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 404) c.Assert(serr.ServiceCode(), chk.Equals, ServiceCodeBlobNotFound) } func (s *aztestsSuite) TestBlobGetPropsAndMetadataIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.GetProperties(ctx, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) c.Assert(err, chk.NotNil) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 412) } func (s *aztestsSuite) TestBlobGetPropsAndMetadataIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobGetPropsAndMetadataIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.GetProperties(ctx, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) c.Assert(err, chk.NotNil) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 304) } func (s *aztestsSuite) TestBlobSetPropertiesBasic(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetHTTPHeaders(ctx, basicHeaders, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) h := resp.NewHTTPHeaders() c.Assert(h, chk.DeepEquals, basicHeaders) } func (s *aztestsSuite) TestBlobSetPropertiesEmptyValue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{ContentType: "my_type"}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.ContentType(), chk.Equals, "") } func validatePropertiesSet(c *chk.C, blobURL BlockBlobURL, disposition string) { resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.ContentDisposition(), chk.Equals, disposition) } func (s *aztestsSuite) TestBlobSetPropertiesIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validatePropertiesSet(c, blobURL, "my_disposition") } func (s *aztestsSuite) TestBlobSetPropertiesIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetPropertiesIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validatePropertiesSet(c, blobURL, "my_disposition") } func (s *aztestsSuite) TestBlobSetPropertiesIfUnmodifiedSinceFalse(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetPropertiesIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validatePropertiesSet(c, blobURL, "my_disposition") } func (s *aztestsSuite) TestBlobSetPropertiesIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetPropertiesIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validatePropertiesSet(c, blobURL, "my_disposition") } func (s *aztestsSuite) TestBlobSetPropertiesIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.SetHTTPHeaders(ctx, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetMetadataNil(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, Metadata{"not": "nil"}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.SetMetadata(ctx, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestBlobSetMetadataEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, Metadata{"not": "nil"}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.SetMetadata(ctx, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestBlobSetMetadataInvalidField(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, Metadata{"Invalid field!": "value"}, BlobAccessConditions{}) c.Assert(err, chk.NotNil) c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true) } func validateMetadataSet(c *chk.C, blobURL BlockBlobURL) { resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobSetMetadataIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateMetadataSet(c, blobURL) } func (s *aztestsSuite) TestBlobSetMetadataIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetMetadataIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateMetadataSet(c, blobURL) } func (s *aztestsSuite) TestBlobSetMetadataIfUnmodifiedSinceFalse(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetMetadataIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validateMetadataSet(c, blobURL) } func (s *aztestsSuite) TestBlobSetMetadataIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetMetadataIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validateMetadataSet(c, blobURL) } func (s *aztestsSuite) TestBlobSetMetadataIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.SetMetadata(ctx, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func testBlobsUndeleteImpl(c *chk.C, bsu ServiceURL) error { containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) c.Assert(err, chk.IsNil) // This call will not have errors related to slow update of service properties, so we assert. _, err = blobURL.Undelete(ctx) if err != nil { // We want to give the wrapper method a chance to check if it was an error related to the service properties update. return err } resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) if err != nil { return errors.New(string(err.(StorageError).ServiceCode())) } c.Assert(resp.BlobType(), chk.Equals, BlobBlockBlob) // We could check any property. This is just to double check it was undeleted. return nil } func (s *aztestsSuite) TestBlobsUndelete(c *chk.C) { bsu := getBSU() runTestRequiringServiceProperties(c, bsu, string(ServiceCodeBlobNotFound), enableSoftDelete, testBlobsUndeleteImpl, disableSoftDelete) } func setAndCheckBlobTier(c *chk.C, containerURL ContainerURL, blobURL BlobURL, tier AccessTierType) { _, err := blobURL.SetTier(ctx, tier, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.AccessTier(), chk.Equals, string(tier)) resp2, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.Segment.BlobItems[0].Properties.AccessTier, chk.Equals, tier) } func (s *aztestsSuite) TestBlobSetTierAllTiers(c *chk.C) { bsu, err := getBlobStorageBSU() if err != nil { c.Skip(err.Error()) } containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) setAndCheckBlobTier(c, containerURL, blobURL.BlobURL, AccessTierHot) setAndCheckBlobTier(c, containerURL, blobURL.BlobURL, AccessTierCool) setAndCheckBlobTier(c, containerURL, blobURL.BlobURL, AccessTierArchive) bsu, err = getPremiumBSU() if err != nil { c.Skip(err.Error()) } containerURL, _ = createNewContainer(c, bsu) defer deleteContainer(c, containerURL) pageBlobURL, _ := createNewPageBlob(c, containerURL) setAndCheckBlobTier(c, containerURL, pageBlobURL.BlobURL, AccessTierP4) setAndCheckBlobTier(c, containerURL, pageBlobURL.BlobURL, AccessTierP6) setAndCheckBlobTier(c, containerURL, pageBlobURL.BlobURL, AccessTierP10) setAndCheckBlobTier(c, containerURL, pageBlobURL.BlobURL, AccessTierP20) setAndCheckBlobTier(c, containerURL, pageBlobURL.BlobURL, AccessTierP30) setAndCheckBlobTier(c, containerURL, pageBlobURL.BlobURL, AccessTierP40) setAndCheckBlobTier(c, containerURL, pageBlobURL.BlobURL, AccessTierP50) } func (s *aztestsSuite) TestBlobTierInferred(c *chk.C) { bsu, err := getPremiumBSU() if err != nil { c.Skip(err.Error()) } containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.AccessTierInferred(), chk.Equals, "true") resp2, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.Segment.BlobItems[0].Properties.AccessTierInferred, chk.NotNil) c.Assert(resp2.Segment.BlobItems[0].Properties.AccessTier, chk.Not(chk.Equals), "") _, err = blobURL.SetTier(ctx, AccessTierP4, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) resp, err = blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.AccessTierInferred(), chk.Equals, "") resp2, err = containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.Segment.BlobItems[0].Properties.AccessTierInferred, chk.IsNil) // AccessTierInferred never returned if false } func (s *aztestsSuite) TestBlobArchiveStatus(c *chk.C) { bsu, err := getBlobStorageBSU() if err != nil { c.Skip(err.Error()) } containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err = blobURL.SetTier(ctx, AccessTierArchive, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.SetTier(ctx, AccessTierCool, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.ArchiveStatus(), chk.Equals, string(ArchiveStatusRehydratePendingToCool)) resp2, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.Segment.BlobItems[0].Properties.ArchiveStatus, chk.Equals, ArchiveStatusRehydratePendingToCool) // delete first blob _, err = blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) c.Assert(err, chk.IsNil) blobURL, _ = createNewBlockBlob(c, containerURL) _, err = blobURL.SetTier(ctx, AccessTierArchive, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.SetTier(ctx, AccessTierHot, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) resp, err = blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.ArchiveStatus(), chk.Equals, string(ArchiveStatusRehydratePendingToHot)) resp2, err = containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.Segment.BlobItems[0].Properties.ArchiveStatus, chk.Equals, ArchiveStatusRehydratePendingToHot) } func (s *aztestsSuite) TestBlobTierInvalidValue(c *chk.C) { bsu, err := getBlobStorageBSU() if err != nil { c.Skip(err.Error()) } containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err = blobURL.SetTier(ctx, AccessTierType("garbage"), LeaseAccessConditions{}) validateStorageError(c, err, ServiceCodeInvalidHeaderValue) } func (s *aztestsSuite) TestBlobURLPartsSASQueryTimes(c *chk.C) { StartTimesInputs := []string{ "2020-04-20", "2020-04-20T07:00Z", "2020-04-20T07:15:00Z", "2020-04-20T07:30:00.1234567Z", } StartTimesExpected := []time.Time{ time.Date(2020, time.April, 20, 0, 0, 0, 0, time.UTC), time.Date(2020, time.April, 20, 7, 0, 0, 0, time.UTC), time.Date(2020, time.April, 20, 7, 15, 0, 0, time.UTC), time.Date(2020, time.April, 20, 7, 30, 0, 123456700, time.UTC), } ExpiryTimesInputs := []string{ "2020-04-21", "2020-04-20T08:00Z", "2020-04-20T08:15:00Z", "2020-04-20T08:30:00.2345678Z", } ExpiryTimesExpected := []time.Time{ time.Date(2020, time.April, 21, 0, 0, 0, 0, time.UTC), time.Date(2020, time.April, 20, 8, 0, 0, 0, time.UTC), time.Date(2020, time.April, 20, 8, 15, 0, 0, time.UTC), time.Date(2020, time.April, 20, 8, 30, 0, 234567800, time.UTC), } for i := 0; i < len(StartTimesInputs); i++ { urlString := "https://myaccount.blob.core.windows.net/mycontainer/mydirectory/myfile.txt?" + "se=" + url.QueryEscape(ExpiryTimesInputs[i]) + "&" + "sig=NotASignature&" + "sp=r&" + "spr=https&" + "sr=b&" + "st=" + url.QueryEscape(StartTimesInputs[i]) + "&" + "sv=2019-10-10" url, _ := url.Parse(urlString) parts := NewBlobURLParts(*url) c.Assert(parts.Scheme, chk.Equals, "https") c.Assert(parts.Host, chk.Equals, "myaccount.blob.core.windows.net") c.Assert(parts.ContainerName, chk.Equals, "mycontainer") c.Assert(parts.BlobName, chk.Equals, "mydirectory/myfile.txt") sas := parts.SAS c.Assert(sas.StartTime(), chk.Equals, StartTimesExpected[i]) c.Assert(sas.ExpiryTime(), chk.Equals, ExpiryTimesExpected[i]) uResult := parts.URL() c.Assert(uResult.String(), chk.Equals, urlString) } } func (s *aztestsSuite) TestDownloadBlockBlobUnexpectedEOF(c *chk.C) { bsu := getBSU() cURL, _ := createNewContainer(c, bsu) bURL, _ := createNewBlockBlob(c, cURL) // This uploads for us. resp, err := bURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) // Verify that we can inject errors first. reader := resp.Body(InjectErrorInRetryReaderOptions(errors.New("unrecoverable error"))) _, err = ioutil.ReadAll(reader) c.Assert(err, chk.NotNil) c.Assert(err.Error(), chk.Equals, "unrecoverable error") // Then inject the retryable error. reader = resp.Body(InjectErrorInRetryReaderOptions(io.ErrUnexpectedEOF)) buf, err := ioutil.ReadAll(reader) c.Assert(err, chk.IsNil) c.Assert(buf, chk.DeepEquals, []byte(blockBlobDefaultData)) } azure-storage-blob-go-0.10.0/azblob/zt_url_block_blob_test.go000066400000000000000000001101141367515646300242430ustar00rootroot00000000000000package azblob import ( "context" "encoding/base64" "encoding/binary" "fmt" "io/ioutil" "math" "time" "crypto/md5" "bytes" "strings" guuid "github.com/google/uuid" chk "gopkg.in/check.v1" // go get gopkg.in/check.v1 ) func (s *aztestsSuite) TestStageGetBlocks(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer delContainer(c, container) blob := container.NewBlockBlobURL(generateBlobName()) blockID := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 0))) putResp, err := blob.StageBlock(context.Background(), blockID, getReaderToRandomBytes(1024), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(putResp.Response().StatusCode, chk.Equals, 201) c.Assert(putResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(putResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(putResp.Version(), chk.Not(chk.Equals), "") c.Assert(putResp.Date().IsZero(), chk.Equals, false) blockList, err := blob.GetBlockList(context.Background(), BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(blockList.Response().StatusCode, chk.Equals, 200) c.Assert(blockList.LastModified().IsZero(), chk.Equals, true) c.Assert(blockList.ETag(), chk.Equals, ETagNone) c.Assert(blockList.ContentType(), chk.Not(chk.Equals), "") c.Assert(blockList.BlobContentLength(), chk.Equals, int64(-1)) c.Assert(blockList.RequestID(), chk.Not(chk.Equals), "") c.Assert(blockList.Version(), chk.Not(chk.Equals), "") c.Assert(blockList.Date().IsZero(), chk.Equals, false) c.Assert(blockList.CommittedBlocks, chk.HasLen, 0) c.Assert(blockList.UncommittedBlocks, chk.HasLen, 1) listResp, err := blob.CommitBlockList(context.Background(), []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(listResp.Response().StatusCode, chk.Equals, 201) c.Assert(listResp.LastModified().IsZero(), chk.Equals, false) c.Assert(listResp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(listResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(listResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(listResp.Version(), chk.Not(chk.Equals), "") c.Assert(listResp.Date().IsZero(), chk.Equals, false) blockList, err = blob.GetBlockList(context.Background(), BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(blockList.Response().StatusCode, chk.Equals, 200) c.Assert(blockList.LastModified().IsZero(), chk.Equals, false) c.Assert(blockList.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(blockList.ContentType(), chk.Not(chk.Equals), "") c.Assert(blockList.BlobContentLength(), chk.Equals, int64(1024)) c.Assert(blockList.RequestID(), chk.Not(chk.Equals), "") c.Assert(blockList.Version(), chk.Not(chk.Equals), "") c.Assert(blockList.Date().IsZero(), chk.Equals, false) c.Assert(blockList.CommittedBlocks, chk.HasLen, 1) c.Assert(blockList.UncommittedBlocks, chk.HasLen, 0) } func (s *aztestsSuite) TestStageBlockFromURL(c *chk.C) { bsu := getBSU() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } container, _ := createNewContainer(c, bsu) defer delContainer(c, container) testSize := 8 * 1024 * 1024 // 8MB r, sourceData := getRandomDataAndReader(testSize) ctx := context.Background() // Use default Background context srcBlob := container.NewBlockBlobURL(generateBlobName()) destBlob := container.NewBlockBlobURL(generateBlobName()) // Prepare source blob for copy. uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(uploadSrcResp.Response().StatusCode, chk.Equals, 201) // Get source blob URL with SAS for StageFromURL. srcBlobParts := NewBlobURLParts(srcBlob.URL()) srcBlobParts.SAS, err = BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration ContainerName: srcBlobParts.ContainerName, BlobName: srcBlobParts.BlobName, Permissions: BlobSASPermissions{Read: true}.String(), }.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } srcBlobURLWithSAS := srcBlobParts.URL() // Stage blocks from URL. blockID1, blockID2 := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 0))), base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 1))) stageResp1, err := destBlob.StageBlockFromURL(ctx, blockID1, srcBlobURLWithSAS, 0, 4*1024*1024, LeaseAccessConditions{}, ModifiedAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(stageResp1.Response().StatusCode, chk.Equals, 201) c.Assert(stageResp1.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(stageResp1.RequestID(), chk.Not(chk.Equals), "") c.Assert(stageResp1.Version(), chk.Not(chk.Equals), "") c.Assert(stageResp1.Date().IsZero(), chk.Equals, false) stageResp2, err := destBlob.StageBlockFromURL(ctx, blockID2, srcBlobURLWithSAS, 4*1024*1024, CountToEnd, LeaseAccessConditions{}, ModifiedAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(stageResp2.Response().StatusCode, chk.Equals, 201) c.Assert(stageResp2.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(stageResp2.RequestID(), chk.Not(chk.Equals), "") c.Assert(stageResp2.Version(), chk.Not(chk.Equals), "") c.Assert(stageResp2.Date().IsZero(), chk.Equals, false) // Check block list. blockList, err := destBlob.GetBlockList(context.Background(), BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(blockList.Response().StatusCode, chk.Equals, 200) c.Assert(blockList.CommittedBlocks, chk.HasLen, 0) c.Assert(blockList.UncommittedBlocks, chk.HasLen, 2) // Commit block list. listResp, err := destBlob.CommitBlockList(context.Background(), []string{blockID1, blockID2}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(listResp.Response().StatusCode, chk.Equals, 201) // Check data integrity through downloading. downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{})) c.Assert(err, chk.IsNil) c.Assert(destData, chk.DeepEquals, sourceData) } func (s *aztestsSuite) TestCopyBlockBlobFromURL(c *chk.C) { bsu := getBSU() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } container, _ := createNewContainer(c, bsu) defer delContainer(c, container) testSize := 8 * 1024 * 1024 // 8MB r, sourceData := getRandomDataAndReader(testSize) sourceDataMD5Value := md5.Sum(sourceData) ctx := context.Background() // Use default Background context srcBlob := container.NewBlockBlobURL(generateBlobName()) destBlob := container.NewBlockBlobURL(generateBlobName()) // Prepare source blob for copy. uploadSrcResp, err := srcBlob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(uploadSrcResp.Response().StatusCode, chk.Equals, 201) // Get source blob URL with SAS for StageFromURL. srcBlobParts := NewBlobURLParts(srcBlob.URL()) srcBlobParts.SAS, err = BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration ContainerName: srcBlobParts.ContainerName, BlobName: srcBlobParts.BlobName, Permissions: BlobSASPermissions{Read: true}.String(), }.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } srcBlobURLWithSAS := srcBlobParts.URL() // Invoke copy blob from URL. resp, err := destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{"foo": "bar"}, ModifiedAccessConditions{}, BlobAccessConditions{}, sourceDataMD5Value[:]) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 202) c.Assert(resp.ETag(), chk.Not(chk.Equals), "") c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(resp.Version(), chk.Not(chk.Equals), "") c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(resp.CopyID(), chk.Not(chk.Equals), "") c.Assert(resp.ContentMD5(), chk.DeepEquals, sourceDataMD5Value[:]) c.Assert(string(resp.CopyStatus()), chk.DeepEquals, "success") // Check data integrity through downloading. downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{})) c.Assert(err, chk.IsNil) c.Assert(destData, chk.DeepEquals, sourceData) // Make sure the metadata got copied over c.Assert(len(downloadResp.NewMetadata()), chk.Equals, 1) // Edge case 1: Provide bad MD5 and make sure the copy fails _, badMD5 := getRandomDataAndReader(16) _, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, badMD5) c.Assert(err, chk.NotNil) // Edge case 2: Not providing any source MD5 should see the CRC getting returned instead resp, err = destBlob.CopyFromURL(ctx, srcBlobURLWithSAS, Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 202) c.Assert(resp.XMsContentCrc64(), chk.Not(chk.Equals), "") } func (s *aztestsSuite) TestBlobSASQueryParamOverrideResponseHeaders(c *chk.C) { bsu := getBSU() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } container, _ := createNewContainer(c, bsu) defer delContainer(c, container) testSize := 8 * 1024 * 1024 // 8MB r, _ := getRandomDataAndReader(testSize) ctx := context.Background() // Use default Background context blob := container.NewBlockBlobURL(generateBlobName()) uploadResp, err := blob.Upload(ctx, r, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(uploadResp.Response().StatusCode, chk.Equals, 201) // Get blob URL with SAS. blobParts := NewBlobURLParts(blob.URL()) cacheControlVal := "cache-control-override" contentDispositionVal := "content-disposition-override" contentEncodingVal := "content-encoding-override" contentLanguageVal := "content-language-override" contentTypeVal := "content-type-override" blobParts.SAS, err = BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration ContainerName: blobParts.ContainerName, BlobName: blobParts.BlobName, Permissions: BlobSASPermissions{Read: true}.String(), CacheControl: cacheControlVal, ContentDisposition: contentDispositionVal, ContentEncoding: contentEncodingVal, ContentLanguage: contentLanguageVal, ContentType: contentTypeVal, }.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } blobURL := NewBlobURL(blobParts.URL(), NewPipeline(NewAnonymousCredential(), PipelineOptions{})) gResp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(gResp.CacheControl(), chk.Equals, cacheControlVal) c.Assert(gResp.ContentDisposition(), chk.Equals, contentDispositionVal) c.Assert(gResp.ContentEncoding(), chk.Equals, contentEncodingVal) c.Assert(gResp.ContentLanguage(), chk.Equals, contentLanguageVal) c.Assert(gResp.ContentType(), chk.Equals, contentTypeVal) } func (s *aztestsSuite) TestStageBlockWithMD5(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer delContainer(c, container) blob := container.NewBlockBlobURL(generateBlobName()) blockID := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%6d", 0))) // test put block with valid MD5 value readerToBody, body := getRandomDataAndReader(1024) md5Value := md5.Sum(body) putResp, err := blob.StageBlock(context.Background(), blockID, readerToBody, LeaseAccessConditions{}, md5Value[:]) c.Assert(err, chk.IsNil) c.Assert(putResp.Response().StatusCode, chk.Equals, 201) c.Assert(putResp.ContentMD5(), chk.DeepEquals, md5Value[:]) c.Assert(putResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(putResp.Version(), chk.Not(chk.Equals), "") c.Assert(putResp.Date().IsZero(), chk.Equals, false) // test put block with bad MD5 value readerToBody, body = getRandomDataAndReader(1024) _, badMD5 := getRandomDataAndReader(16) putResp, err = blob.StageBlock(context.Background(), blockID, readerToBody, LeaseAccessConditions{}, badMD5[:]) validateStorageError(c, err, ServiceCodeMd5Mismatch) } func (s *aztestsSuite) TestBlobPutBlobNonEmptyBody(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.Upload(ctx, strings.NewReader(blockBlobDefaultData), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) data, err := ioutil.ReadAll(resp.Response().Body) c.Assert(string(data), chk.Equals, blockBlobDefaultData) } func (s *aztestsSuite) TestBlobPutBlobHTTPHeaders(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.Upload(ctx, bytes.NewReader(nil), basicHeaders, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) h := resp.NewHTTPHeaders() h.ContentMD5 = nil // the service generates a MD5 value, omit before comparing c.Assert(h, chk.DeepEquals, basicHeaders) } func (s *aztestsSuite) TestBlobPutBlobMetadataNotEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobPutBlobMetadataEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestBlobPutBlobMetadataInvalid(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.Upload(ctx, nil, BlobHTTPHeaders{}, Metadata{"In valid!": "bar"}, BlobAccessConditions{}) c.Assert(strings.Contains(err.Error(), validationErrorSubstring), chk.Equals, true) } func (s *aztestsSuite) TestBlobPutBlobIfModifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateUpload(c, blobURL) } func (s *aztestsSuite) TestBlobPutBlobIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutBlobIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateUpload(c, blobURL) } func (s *aztestsSuite) TestBlobPutBlobIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutBlobIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validateUpload(c, blobURL) } func (s *aztestsSuite) TestBlobPutBlobIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutBlobIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validateUpload(c, blobURL) } func (s *aztestsSuite) TestBlobPutBlobIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.Upload(ctx, bytes.NewReader(nil), BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } var blockID string // a single blockID used in tests when only a single ID is needed func init() { u := [64]byte{} binary.BigEndian.PutUint32((u[len(guuid.UUID{}):]), math.MaxUint32) blockID = base64.StdEncoding.EncodeToString(u[:]) } func (s *aztestsSuite) TestBlobGetBlockListNone(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) resp, err := blobURL.GetBlockList(ctx, BlockListNone, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CommittedBlocks, chk.HasLen, 0) c.Assert(resp.UncommittedBlocks, chk.HasLen, 0) // Not specifying a block list type should default to only returning committed blocks } func (s *aztestsSuite) TestBlobGetBlockListUncommitted(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) resp, err := blobURL.GetBlockList(ctx, BlockListUncommitted, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CommittedBlocks, chk.HasLen, 0) c.Assert(resp.UncommittedBlocks, chk.HasLen, 1) } func (s *aztestsSuite) TestBlobGetBlockListCommitted(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.CommitBlockList(ctx, []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) resp, err := blobURL.GetBlockList(ctx, BlockListCommitted, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CommittedBlocks, chk.HasLen, 1) c.Assert(resp.UncommittedBlocks, chk.HasLen, 0) } func (s *aztestsSuite) TestBlobGetBlockListCommittedEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) resp, err := blobURL.GetBlockList(ctx, BlockListCommitted, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CommittedBlocks, chk.HasLen, 0) c.Assert(resp.UncommittedBlocks, chk.HasLen, 0) } func (s *aztestsSuite) TestBlobGetBlockListBothEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{}) validateStorageError(c, err, ServiceCodeBlobNotFound) } func (s *aztestsSuite) TestBlobGetBlockListBothNotEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) id := newID() // Put and commit two blocks _, err := blobURL.StageBlock(ctx, id.next(), strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.StageBlock(ctx, id.next(), strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.CommitBlockList(ctx, id.issued(), BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) // Put two uncommitted blocks _, err = blobURL.StageBlock(ctx, id.next(), strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.StageBlock(ctx, id.next(), strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) resp, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CommittedBlocks[0].Name, chk.Equals, id.issued()[0]) c.Assert(resp.CommittedBlocks[1].Name, chk.Equals, id.issued()[1]) // Committed blocks are returned in the order they are committed (in the commit list) c.Assert(resp.UncommittedBlocks[0].Name, chk.Equals, id.issued()[2]) // Uncommitted blocks are returned in alphabetical order c.Assert(resp.UncommittedBlocks[1].Name, chk.Equals, id.issued()[3]) } func (s *aztestsSuite) TestBlobGetBlockListInvalidType(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.GetBlockList(ctx, BlockListType("garbage"), LeaseAccessConditions{}) validateStorageError(c, err, ServiceCodeInvalidQueryParameterValue) } func (s *aztestsSuite) TestBlobGetBlockListSnapshot(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.CommitBlockList(ctx, []string{blockID}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) snapshotURL := blobURL.WithSnapshot(resp.Snapshot()) resp2, err := snapshotURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.CommittedBlocks, chk.HasLen, 1) } func (s *aztestsSuite) TestBlobPutBlockIDInvalidCharacters(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, "!!", strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) validateStorageError(c, err, ServiceCodeInvalidQueryParameterValue) } func (s *aztestsSuite) TestBlobPutBlockIDInvalidLength(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.StageBlock(ctx, "00000000", strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) validateStorageError(c, err, ServiceCodeInvalidBlobOrBlock) } func (s *aztestsSuite) TestBlobPutBlockEmptyBody(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(""), LeaseAccessConditions{}, nil) validateStorageError(c, err, ServiceCodeInvalidHeaderValue) } func setupPutBlockListTest(c *chk.C) (containerURL ContainerURL, blobURL BlockBlobURL, id string) { bsu := getBSU() containerURL, _ = createNewContainer(c, bsu) blobURL, _ = getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) return containerURL, blobURL, blockID } func (s *aztestsSuite) TestBlobPutBlockListInvalidID(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id[:2]}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeInvalidBlockID) } func (s *aztestsSuite) TestBlobPutBlockListDuplicateBlocks(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id, id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CommittedBlocks, chk.HasLen, 2) } func (s *aztestsSuite) TestBlobPutBlockListEmptyList(c *chk.C) { containerURL, blobURL, _ := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{}, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CommittedBlocks, chk.HasLen, 0) } func (s *aztestsSuite) TestBlobPutBlockListMetadataEmpty(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestBlobPutBlockListMetadataNonEmpty(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobPutBlockListHTTPHeaders(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, basicHeaders, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) h := resp.NewHTTPHeaders() c.Assert(h, chk.DeepEquals, basicHeaders) } func (s *aztestsSuite) TestBlobPutBlockListHTTPHeadersEmpty(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{ContentDisposition: "my_disposition"}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.ContentDisposition(), chk.Equals, "") } func validateBlobCommitted(c *chk.C, blobURL BlockBlobURL) { resp, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CommittedBlocks, chk.HasLen, 1) } func (s *aztestsSuite) TestBlobPutBlockListIfModifiedSinceTrue(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time c.Assert(err, chk.IsNil) currentTime := getRelativeTimeGMT(-10) _, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateBlobCommitted(c, blobURL) } func (s *aztestsSuite) TestBlobPutBlockListIfModifiedSinceFalse(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutBlockListIfUnmodifiedSinceTrue(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time c.Assert(err, chk.IsNil) currentTime := getRelativeTimeGMT(10) _, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateBlobCommitted(c, blobURL) } func (s *aztestsSuite) TestBlobPutBlockListIfUnmodifiedSinceFalse(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutBlockListIfMatchTrue(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) resp, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time c.Assert(err, chk.IsNil) _, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validateBlobCommitted(c, blobURL) } func (s *aztestsSuite) TestBlobPutBlockListIfMatchFalse(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time c.Assert(err, chk.IsNil) _, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutBlockListIfNoneMatchTrue(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time c.Assert(err, chk.IsNil) _, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validateBlobCommitted(c, blobURL) } func (s *aztestsSuite) TestBlobPutBlockListIfNoneMatchFalse(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) resp, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) // The blob must actually exist to have a modifed time c.Assert(err, chk.IsNil) _, err = blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutBlockListValidateData(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) resp, err := blobURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) data, _ := ioutil.ReadAll(resp.Response().Body) c.Assert(string(data), chk.Equals, blockBlobDefaultData) } func (s *aztestsSuite) TestBlobPutBlockListModifyBlob(c *chk.C) { containerURL, blobURL, id := setupPutBlockListTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.CommitBlockList(ctx, []string{id}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.StageBlock(ctx, "0001", bytes.NewReader([]byte("new data")), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.StageBlock(ctx, "0010", bytes.NewReader([]byte("new data")), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.StageBlock(ctx, "0011", bytes.NewReader([]byte("new data")), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.StageBlock(ctx, "0100", bytes.NewReader([]byte("new data")), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) _, err = blobURL.CommitBlockList(ctx, []string{"0001", "0011"}, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetBlockList(ctx, BlockListAll, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.CommittedBlocks, chk.HasLen, 2) c.Assert(resp.CommittedBlocks[0].Name, chk.Equals, "0001") c.Assert(resp.CommittedBlocks[1].Name, chk.Equals, "0011") c.Assert(resp.UncommittedBlocks, chk.HasLen, 0) } azure-storage-blob-go-0.10.0/azblob/zt_url_container_test.go000066400000000000000000001131601367515646300241410ustar00rootroot00000000000000package azblob import ( "bytes" "context" "errors" "os" "strconv" "strings" "time" chk "gopkg.in/check.v1" // go get gopkg.in/check.v1 ) func delContainer(c *chk.C, container ContainerURL) { resp, err := container.Delete(context.Background(), ContainerAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 202) } func (s *aztestsSuite) TestNewContainerURLValidName(c *chk.C) { bsu := getBSU() testURL := bsu.NewContainerURL(containerPrefix) correctURL := "https://" + os.Getenv("ACCOUNT_NAME") + ".blob.core.windows.net/" + containerPrefix temp := testURL.URL() c.Assert(temp.String(), chk.Equals, correctURL) } func (s *aztestsSuite) TestCreateRootContainerURL(c *chk.C) { bsu := getBSU() testURL := bsu.NewContainerURL(ContainerNameRoot) correctURL := "https://" + os.Getenv("ACCOUNT_NAME") + ".blob.core.windows.net/$root" temp := testURL.URL() c.Assert(temp.String(), chk.Equals, correctURL) } func (s *aztestsSuite) TestAccountWithPipeline(c *chk.C) { bsu := getBSU() bsu = bsu.WithPipeline(testPipeline{}) // testPipeline returns an identifying message as an error containerURL := bsu.NewContainerURL("name") _, err := containerURL.Create(ctx, Metadata{}, PublicAccessBlob) c.Assert(err.Error(), chk.Equals, testPipelineMessage) } func (s *aztestsSuite) TestContainerCreateInvalidName(c *chk.C) { bsu := getBSU() containerURL := bsu.NewContainerURL("foo bar") _, err := containerURL.Create(ctx, Metadata{}, PublicAccessBlob) validateStorageError(c, err, ServiceCodeInvalidResourceName) } func (s *aztestsSuite) TestContainerCreateEmptyName(c *chk.C) { bsu := getBSU() containerURL := bsu.NewContainerURL("") _, err := containerURL.Create(ctx, Metadata{}, PublicAccessBlob) validateStorageError(c, err, ServiceCodeInvalidQueryParameterValue) } func (s *aztestsSuite) TestContainerCreateNameCollision(c *chk.C) { bsu := getBSU() containerURL, containerName := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) containerURL = bsu.NewContainerURL(containerName) _, err := containerURL.Create(ctx, Metadata{}, PublicAccessBlob) validateStorageError(c, err, ServiceCodeContainerAlreadyExists) } func (s *aztestsSuite) TestContainerCreateInvalidMetadata(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.Create(ctx, Metadata{"1 foo": "bar"}, PublicAccessBlob) c.Assert(err, chk.NotNil) c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true) } func (s *aztestsSuite) TestContainerCreateNilMetadata(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.Create(ctx, nil, PublicAccessBlob) defer deleteContainer(c, containerURL) c.Assert(err, chk.IsNil) response, err := containerURL.GetProperties(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(response.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestContainerCreateEmptyMetadata(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.Create(ctx, Metadata{}, PublicAccessBlob) defer deleteContainer(c, containerURL) c.Assert(err, chk.IsNil) response, err := containerURL.GetProperties(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(response.NewMetadata(), chk.HasLen, 0) } // Note that for all tests that create blobs, deleting the container also deletes any blobs within that container, thus we // simply delete the whole container after the test func (s *aztestsSuite) TestContainerCreateAccessContainer(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.Create(ctx, nil, PublicAccessContainer) defer deleteContainer(c, containerURL) c.Assert(err, chk.IsNil) blobURL := containerURL.NewBlockBlobURL(blobPrefix) blobURL.Upload(ctx, bytes.NewReader([]byte("Content")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}) // Anonymous enumeration should be valid with container access containerURL2 := NewContainerURL(containerURL.URL(), NewPipeline(NewAnonymousCredential(), PipelineOptions{})) response, err := containerURL2.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(response.Segment.BlobItems[0].Name, chk.Equals, blobPrefix) // Getting blob data anonymously should still be valid with container access blobURL2 := containerURL2.NewBlockBlobURL(blobPrefix) resp, err := blobURL2.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestContainerCreateAccessBlob(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.Create(ctx, nil, PublicAccessBlob) defer deleteContainer(c, containerURL) c.Assert(err, chk.IsNil) blobURL := containerURL.NewBlockBlobURL(blobPrefix) blobURL.Upload(ctx, bytes.NewReader([]byte("Content")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}) // Reference the same container URL but with anonymous credentials containerURL2 := NewContainerURL(containerURL.URL(), NewPipeline(NewAnonymousCredential(), PipelineOptions{})) _, err = containerURL2.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) validateStorageError(c, err, ServiceCodeResourceNotFound) // Listing blobs is not publicly accessible // Accessing blob specific data should be public blobURL2 := containerURL2.NewBlockBlobURL(blobPrefix) resp, err := blobURL2.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestContainerCreateAccessNone(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.Create(ctx, nil, PublicAccessNone) defer deleteContainer(c, containerURL) blobURL := containerURL.NewBlockBlobURL(blobPrefix) blobURL.Upload(ctx, bytes.NewReader([]byte("Content")), BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}) // Reference the same container URL but with anonymous credentials containerURL2 := NewContainerURL(containerURL.URL(), NewPipeline(NewAnonymousCredential(), PipelineOptions{})) // Listing blobs is not public _, err = containerURL2.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) validateStorageError(c, err, ServiceCodeResourceNotFound) // Blob data is not public blobURL2 := containerURL2.NewBlockBlobURL(blobPrefix) _, err = blobURL2.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.NotNil) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 404) // HEAD request does not return a status code } func validateContainerDeleted(c *chk.C, containerURL ContainerURL) { _, err := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) validateStorageError(c, err, ServiceCodeContainerNotFound) } func (s *aztestsSuite) TestContainerDelete(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) _, err := containerURL.Delete(ctx, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) validateContainerDeleted(c, containerURL) } func (s *aztestsSuite) TestContainerDeleteNonExistant(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.Delete(ctx, ContainerAccessConditions{}) validateStorageError(c, err, ServiceCodeContainerNotFound) } func (s *aztestsSuite) TestContainerDeleteIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) // Ensure the requests occur at different times bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) _, err := containerURL.Delete(ctx, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateContainerDeleted(c, containerURL) } func (s *aztestsSuite) TestContainerDeleteIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := containerURL.Delete(ctx, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestContainerDeleteIfUnModifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) currentTime := getRelativeTimeGMT(10) _, err := containerURL.Delete(ctx, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateContainerDeleted(c, containerURL) } func (s *aztestsSuite) TestContainerDeleteIfUnModifiedSinceFalse(c *chk.C) { currentTime := getRelativeTimeGMT(-10) // Ensure the requests occur at different times bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.Delete(ctx, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestContainerAccessConditionsUnsupportedConditions(c *chk.C) { // This test defines that the library will panic if the user specifies conditional headers // that will be ignored by the service bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) invalidEtag := ETag("invalid") _, err := containerURL.SetMetadata(ctx, basicMetadata, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: invalidEtag}}) c.Assert(err, chk.Not(chk.Equals), nil) } func (s *aztestsSuite) TestContainerListBlobsNonexistantPrefix(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) createNewBlockBlob(c, containerURL) resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Prefix: blobPrefix + blobPrefix}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 0) } func (s *aztestsSuite) TestContainerListBlobsSpecificValidPrefix(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, blobName := createNewBlockBlob(c, containerURL) resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Prefix: blobPrefix}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 1) c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName) } func (s *aztestsSuite) TestContainerListBlobsValidDelimiter(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) createBlockBlobWithPrefix(c, containerURL, "a/1") createBlockBlobWithPrefix(c, containerURL, "a/2") createBlockBlobWithPrefix(c, containerURL, "b/1") _, blobName := createBlockBlobWithPrefix(c, containerURL, "blob") resp, err := containerURL.ListBlobsHierarchySegment(ctx, Marker{}, "/", ListBlobsSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(len(resp.Segment.BlobItems), chk.Equals, 1) c.Assert(len(resp.Segment.BlobPrefixes), chk.Equals, 2) c.Assert(resp.Segment.BlobPrefixes[0].Name, chk.Equals, "a/") c.Assert(resp.Segment.BlobPrefixes[1].Name, chk.Equals, "b/") c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName) } func (s *aztestsSuite) TestContainerListBlobsWithSnapshots(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.ListBlobsHierarchySegment(ctx, Marker{}, "/", ListBlobsSegmentOptions{Details: BlobListingDetails{Snapshots: true}}) c.Assert(err, chk.Not(chk.Equals), nil) } func (s *aztestsSuite) TestContainerListBlobsInvalidDelimiter(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) createBlockBlobWithPrefix(c, containerURL, "a/1") createBlockBlobWithPrefix(c, containerURL, "a/2") createBlockBlobWithPrefix(c, containerURL, "b/1") createBlockBlobWithPrefix(c, containerURL, "blob") resp, err := containerURL.ListBlobsHierarchySegment(ctx, Marker{}, "^", ListBlobsSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 4) } func (s *aztestsSuite) TestContainerListBlobsIncludeTypeMetadata(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer deleteContainer(c, container) _, blobNameNoMetadata := createBlockBlobWithPrefix(c, container, "a") blobMetadata, blobNameMetadata := createBlockBlobWithPrefix(c, container, "b") _, err := blobMetadata.SetMetadata(ctx, Metadata{"field": "value"}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := container.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Metadata: true}}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobNameNoMetadata) c.Assert(resp.Segment.BlobItems[0].Metadata, chk.HasLen, 0) c.Assert(resp.Segment.BlobItems[1].Name, chk.Equals, blobNameMetadata) c.Assert(resp.Segment.BlobItems[1].Metadata["field"], chk.Equals, "value") } func (s *aztestsSuite) TestContainerListBlobsIncludeTypeSnapshots(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blob, blobName := createNewBlockBlob(c, containerURL) _, err := blob.CreateSnapshot(ctx, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Snapshots: true}}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 2) c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName) c.Assert(resp.Segment.BlobItems[0].Snapshot, chk.NotNil) c.Assert(resp.Segment.BlobItems[1].Name, chk.Equals, blobName) c.Assert(resp.Segment.BlobItems[1].Snapshot, chk.Equals, "") } func (s *aztestsSuite) TestContainerListBlobsIncludeTypeCopy(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, blobName := createNewBlockBlob(c, containerURL) blobCopyURL, blobCopyName := createBlockBlobWithPrefix(c, containerURL, "copy") _, err := blobCopyURL.StartCopyFromURL(ctx, blobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Copy: true}}) // These are sufficient to show that the blob copy was in fact included c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 2) c.Assert(resp.Segment.BlobItems[1].Name, chk.Equals, blobName) c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobCopyName) c.Assert(*resp.Segment.BlobItems[0].Properties.ContentLength, chk.Equals, int64(len(blockBlobDefaultData))) temp := blobURL.URL() c.Assert(*resp.Segment.BlobItems[0].Properties.CopySource, chk.Equals, temp.String()) c.Assert(resp.Segment.BlobItems[0].Properties.CopyStatus, chk.Equals, CopyStatusSuccess) } func (s *aztestsSuite) TestContainerListBlobsIncludeTypeUncommitted(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, blobName := getBlockBlobURL(c, containerURL) _, err := blobURL.StageBlock(ctx, blockID, strings.NewReader(blockBlobDefaultData), LeaseAccessConditions{}, nil) c.Assert(err, chk.IsNil) resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{UncommittedBlobs: true}}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 1) c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName) } func testContainerListBlobsIncludeTypeDeletedImpl(c *chk.C, bsu ServiceURL) error { containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewBlockBlob(c, containerURL) _, err := blobURL.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Deleted: true}}) c.Assert(err, chk.IsNil) if len(resp.Segment.BlobItems) != 1 { return errors.New("DeletedBlobNotFound") } c.Assert(resp.Segment.BlobItems[0].Deleted, chk.Equals, true) return nil } func (s *aztestsSuite) TestContainerListBlobsIncludeTypeDeleted(c *chk.C) { bsu := getBSU() runTestRequiringServiceProperties(c, bsu, "DeletedBlobNotFound", enableSoftDelete, testContainerListBlobsIncludeTypeDeletedImpl, disableSoftDelete) } func testContainerListBlobsIncludeMultipleImpl(c *chk.C, bsu ServiceURL) error { containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, blobName := createBlockBlobWithPrefix(c, containerURL, "z") _, err := blobURL.CreateSnapshot(ctx, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) blobURL2, blobName2 := createBlockBlobWithPrefix(c, containerURL, "copy") resp2, err := blobURL2.StartCopyFromURL(ctx, blobURL.URL(), Metadata{}, ModifiedAccessConditions{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) waitForCopy(c, blobURL2, resp2) blobURL3, blobName3 := createBlockBlobWithPrefix(c, containerURL, "deleted") _, err = blobURL3.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{Details: BlobListingDetails{Snapshots: true, Copy: true, Deleted: true}}) c.Assert(err, chk.IsNil) if len(resp.Segment.BlobItems) != 5 { // If there are fewer blobs in the container than there should be, it will be because one was permanently deleted. return errors.New("DeletedBlobNotFound") } c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName2) c.Assert(resp.Segment.BlobItems[1].Name, chk.Equals, blobName2) // With soft delete, the overwritten blob will have a backup snapshot c.Assert(resp.Segment.BlobItems[2].Name, chk.Equals, blobName3) c.Assert(resp.Segment.BlobItems[3].Name, chk.Equals, blobName) c.Assert(resp.Segment.BlobItems[3].Snapshot, chk.NotNil) c.Assert(resp.Segment.BlobItems[4].Name, chk.Equals, blobName) return nil } func (s *aztestsSuite) TestContainerListBlobsIncludeMultiple(c *chk.C) { bsu := getBSU() runTestRequiringServiceProperties(c, bsu, "DeletedBlobNotFound", enableSoftDelete, testContainerListBlobsIncludeMultipleImpl, disableSoftDelete) } func (s *aztestsSuite) TestContainerListBlobsMaxResultsNegative(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{MaxResults: -2}) c.Assert(err, chk.Not(chk.IsNil)) } func (s *aztestsSuite) TestContainerListBlobsMaxResultsZero(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) createNewBlockBlob(c, containerURL) resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{MaxResults: 0}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 1) } func (s *aztestsSuite) TestContainerListBlobsMaxResultsInsufficient(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, blobName := createBlockBlobWithPrefix(c, containerURL, "a") createBlockBlobWithPrefix(c, containerURL, "b") resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{MaxResults: 1}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 1) c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName) } func (s *aztestsSuite) TestContainerListBlobsMaxResultsExact(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, blobName := createBlockBlobWithPrefix(c, containerURL, "a") _, blobName2 := createBlockBlobWithPrefix(c, containerURL, "b") resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{MaxResults: 2}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 2) c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName) c.Assert(resp.Segment.BlobItems[1].Name, chk.Equals, blobName2) } func (s *aztestsSuite) TestContainerListBlobsMaxResultsSufficient(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, blobName := createBlockBlobWithPrefix(c, containerURL, "a") _, blobName2 := createBlockBlobWithPrefix(c, containerURL, "b") resp, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{MaxResults: 3}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems, chk.HasLen, 2) c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName) c.Assert(resp.Segment.BlobItems[1].Name, chk.Equals, blobName2) } func (s *aztestsSuite) TestContainerListBlobsNonExistentContainer(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) c.Assert(err, chk.NotNil) } func (s *aztestsSuite) TestContainerWithNewPipeline(c *chk.C) { bsu := getBSU() pipeline := testPipeline{} containerURL, _ := getContainerURL(c, bsu) containerURL = containerURL.WithPipeline(pipeline) _, err := containerURL.Create(ctx, Metadata{}, PublicAccessBlob) c.Assert(err, chk.NotNil) c.Assert(err.Error(), chk.Equals, testPipelineMessage) } func (s *aztestsSuite) TestContainerGetSetPermissionsMultiplePolicies(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) // Define the policies start := generateCurrentTimeWithModerateResolution() expiry := start.Add(5 * time.Minute) expiry2 := start.Add(time.Minute) permissions := []SignedIdentifier{ {ID: "0000", AccessPolicy: AccessPolicy{ Start: start, Expiry: expiry, Permission: AccessPolicyPermission{Read: true, Write: true}.String(), }, }, {ID: "0001", AccessPolicy: AccessPolicy{ Start: start, Expiry: expiry2, Permission: AccessPolicyPermission{Read: true}.String(), }, }, } _, err := containerURL.SetAccessPolicy(ctx, PublicAccessNone, permissions, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Items, chk.DeepEquals, permissions) } func (s *aztestsSuite) TestContainerGetPermissionsPublicAccessNotNone(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) containerURL.Create(ctx, nil, PublicAccessBlob) // We create the container explicitly so we can be sure the access policy is not empty defer deleteContainer(c, containerURL) resp, err := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.BlobPublicAccess(), chk.Equals, PublicAccessBlob) } func (s *aztestsSuite) TestContainerSetPermissionsPublicAccessNone(c *chk.C) { // Test the basic one by making an anonymous request to ensure it's actually doing it and also with GetPermissions // For all the others, can just use GetPermissions since we've validated that it at least registers on the server correctly bsu := getBSU() containerURL, containerName := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, blobName := createNewBlockBlob(c, containerURL) // Container is created with PublicAccessBlob, so setting it to None will actually test that it is changed through this method _, err := containerURL.SetAccessPolicy(ctx, PublicAccessNone, nil, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) pipeline := NewPipeline(NewAnonymousCredential(), PipelineOptions{}) bsu2 := NewServiceURL(bsu.URL(), pipeline) containerURL2 := bsu2.NewContainerURL(containerName) blobURL2 := containerURL2.NewBlockBlobURL(blobName) _, err = blobURL2.Download(ctx, 0, 0, BlobAccessConditions{}, false) // Get permissions via the original container URL so the request succeeds resp, _ := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) // If we cannot access a blob's data, we will also not be able to enumerate blobs validateStorageError(c, err, ServiceCodeResourceNotFound) c.Assert(resp.BlobPublicAccess(), chk.Equals, PublicAccessNone) } func (s *aztestsSuite) TestContainerSetPermissionsPublicAccessBlob(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessBlob, nil, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.BlobPublicAccess(), chk.Equals, PublicAccessBlob) } func (s *aztestsSuite) TestContainerSetPermissionsPublicAccessContainer(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessContainer, nil, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.BlobPublicAccess(), chk.Equals, PublicAccessContainer) } func (s *aztestsSuite) TestContainerSetPermissionsACLSinglePolicy(c *chk.C) { bsu := getBSU() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } containerURL, containerName := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, blobName := createNewBlockBlob(c, containerURL) start := time.Now().UTC().Add(-15 * time.Second) expiry := start.Add(5 * time.Minute).UTC() permissions := []SignedIdentifier{{ ID: "0000", AccessPolicy: AccessPolicy{ Start: start, Expiry: expiry, Permission: AccessPolicyPermission{List: true}.String(), }, }} _, err = containerURL.SetAccessPolicy(ctx, PublicAccessNone, permissions, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) serviceSASValues := BlobSASSignatureValues{Identifier: "0000", ContainerName: containerName} queryParams, err := serviceSASValues.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } sasURL := bsu.URL() sasURL.RawQuery = queryParams.Encode() sasPipeline := NewPipeline(NewAnonymousCredential(), PipelineOptions{}) sasBlobServiceURL := NewServiceURL(sasURL, sasPipeline) // Verifies that the SAS can access the resource sasContainer := sasBlobServiceURL.NewContainerURL(containerName) resp, err := sasContainer.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Segment.BlobItems[0].Name, chk.Equals, blobName) // Verifies that successful sas access is not just because it's public anonymousBlobService := NewServiceURL(bsu.URL(), sasPipeline) anonymousContainer := anonymousBlobService.NewContainerURL(containerName) _, err = anonymousContainer.ListBlobsFlatSegment(ctx, Marker{}, ListBlobsSegmentOptions{}) validateStorageError(c, err, ServiceCodeResourceNotFound) } func (s *aztestsSuite) TestContainerSetPermissionsACLMoreThanFive(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) start := time.Now().UTC() expiry := start.Add(5 * time.Minute).UTC() permissions := make([]SignedIdentifier, 6, 6) for i := 0; i < 6; i++ { permissions[i] = SignedIdentifier{ ID: "000" + strconv.Itoa(i), AccessPolicy: AccessPolicy{ Start: start, Expiry: expiry, Permission: AccessPolicyPermission{List: true}.String(), }, } } _, err := containerURL.SetAccessPolicy(ctx, PublicAccessBlob, permissions, ContainerAccessConditions{}) validateStorageError(c, err, ServiceCodeInvalidXMLDocument) } func (s *aztestsSuite) TestContainerSetPermissionsDeleteAndModifyACL(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) start := generateCurrentTimeWithModerateResolution() expiry := start.Add(5 * time.Minute).UTC() permissions := make([]SignedIdentifier, 2, 2) for i := 0; i < 2; i++ { permissions[i] = SignedIdentifier{ ID: "000" + strconv.Itoa(i), AccessPolicy: AccessPolicy{ Start: start, Expiry: expiry, Permission: AccessPolicyPermission{List: true}.String(), }, } } _, err := containerURL.SetAccessPolicy(ctx, PublicAccessBlob, permissions, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Items, chk.DeepEquals, permissions) permissions = resp.Items[:1] // Delete the first policy by removing it from the slice permissions[0].ID = "0004" // Modify the remaining policy which is at index 0 in the new slice _, err = containerURL.SetAccessPolicy(ctx, PublicAccessBlob, permissions, ContainerAccessConditions{}) resp, err = containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Items, chk.HasLen, 1) c.Assert(resp.Items, chk.DeepEquals, permissions) } func (s *aztestsSuite) TestContainerSetPermissionsDeleteAllPolicies(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) start := time.Now().UTC() expiry := start.Add(5 * time.Minute).UTC() permissions := make([]SignedIdentifier, 2, 2) for i := 0; i < 2; i++ { permissions[i] = SignedIdentifier{ ID: "000" + strconv.Itoa(i), AccessPolicy: AccessPolicy{ Start: start, Expiry: expiry, Permission: AccessPolicyPermission{List: true}.String(), }, } } _, err := containerURL.SetAccessPolicy(ctx, PublicAccessBlob, permissions, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) _, err = containerURL.SetAccessPolicy(ctx, PublicAccessBlob, []SignedIdentifier{}, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Items, chk.HasLen, 0) } func (s *aztestsSuite) TestContainerSetPermissionsInvalidPolicyTimes(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) // Swap start and expiry expiry := time.Now().UTC() start := expiry.Add(5 * time.Minute).UTC() permissions := make([]SignedIdentifier, 2, 2) for i := 0; i < 2; i++ { permissions[i] = SignedIdentifier{ ID: "000" + strconv.Itoa(i), AccessPolicy: AccessPolicy{ Start: start, Expiry: expiry, Permission: AccessPolicyPermission{List: true}.String(), }, } } _, err := containerURL.SetAccessPolicy(ctx, PublicAccessBlob, permissions, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) } func (s *aztestsSuite) TestContainerSetPermissionsNilPolicySlice(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessBlob, nil, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) } func (s *aztestsSuite) TestContainerSetPermissionsSignedIdentifierTooLong(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) id := "" for i := 0; i < 65; i++ { id += "a" } expiry := time.Now().UTC() start := expiry.Add(5 * time.Minute).UTC() permissions := make([]SignedIdentifier, 2, 2) for i := 0; i < 2; i++ { permissions[i] = SignedIdentifier{ ID: id, AccessPolicy: AccessPolicy{ Start: start, Expiry: expiry, Permission: AccessPolicyPermission{List: true}.String(), }, } } _, err := containerURL.SetAccessPolicy(ctx, PublicAccessBlob, permissions, ContainerAccessConditions{}) validateStorageError(c, err, ServiceCodeInvalidXMLDocument) } func (s *aztestsSuite) TestContainerSetPermissionsIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() container, _ := createNewContainer(c, bsu) defer deleteContainer(c, container) _, err := container.SetAccessPolicy(ctx, PublicAccessNone, nil, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) resp, err := container.GetAccessPolicy(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.BlobPublicAccess(), chk.Equals, PublicAccessNone) } func (s *aztestsSuite) TestContainerSetPermissionsIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessNone, nil, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestContainerSetPermissionsIfUnModifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessNone, nil, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) resp, err := containerURL.GetAccessPolicy(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.BlobPublicAccess(), chk.Equals, PublicAccessNone) } func (s *aztestsSuite) TestContainerSetPermissionsIfUnModifiedSinceFalse(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.SetAccessPolicy(ctx, PublicAccessNone, nil, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestContainerGetPropertiesAndMetadataNoMetadata(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) resp, err := containerURL.GetProperties(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestContainerGetPropsAndMetaNonExistantContainer(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.GetProperties(ctx, LeaseAccessConditions{}) validateStorageError(c, err, ServiceCodeContainerNotFound) } func (s *aztestsSuite) TestContainerSetMetadataEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.Create(ctx, basicMetadata, PublicAccessBlob) defer deleteContainer(c, containerURL) _, err = containerURL.SetMetadata(ctx, Metadata{}, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.GetProperties(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.HasLen, 0) } func (*aztestsSuite) TestContainerSetMetadataNil(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.Create(ctx, basicMetadata, PublicAccessBlob) defer deleteContainer(c, containerURL) _, err = containerURL.SetMetadata(ctx, nil, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := containerURL.GetProperties(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.HasLen, 0) } func (*aztestsSuite) TestContainerSetMetadataInvalidField(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.SetMetadata(ctx, Metadata{"!nval!d Field!@#%": "value"}, ContainerAccessConditions{}) c.Assert(err, chk.NotNil) c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true) } func (*aztestsSuite) TestContainerSetMetadataNonExistant(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) _, err := containerURL.SetMetadata(ctx, nil, ContainerAccessConditions{}) validateStorageError(c, err, ServiceCodeContainerNotFound) } func (s *aztestsSuite) TestContainerSetMetadataIfModifiedSinceTrue(c *chk.C) { currentTime := getRelativeTimeGMT(-10) bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := containerURL.SetMetadata(ctx, basicMetadata, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) resp, err := containerURL.GetProperties(ctx, LeaseAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestContainerSetMetadataIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := containerURL.SetMetadata(ctx, basicMetadata, ContainerAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestContainerNewBlobURL(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) blobURL := containerURL.NewBlobURL(blobPrefix) tempBlob := blobURL.URL() tempContainer := containerURL.URL() c.Assert(tempBlob.String(), chk.Equals, tempContainer.String()+"/"+blobPrefix) c.Assert(blobURL, chk.FitsTypeOf, BlobURL{}) } func (s *aztestsSuite) TestContainerNewBlockBlobURL(c *chk.C) { bsu := getBSU() containerURL, _ := getContainerURL(c, bsu) blobURL := containerURL.NewBlockBlobURL(blobPrefix) tempBlob := blobURL.URL() tempContainer := containerURL.URL() c.Assert(tempBlob.String(), chk.Equals, tempContainer.String()+"/"+blobPrefix) c.Assert(blobURL, chk.FitsTypeOf, BlockBlobURL{}) } azure-storage-blob-go-0.10.0/azblob/zt_url_page_blob_test.go000066400000000000000000002030171367515646300240720ustar00rootroot00000000000000package azblob import ( "context" "crypto/md5" "io/ioutil" "bytes" "strings" "time" chk "gopkg.in/check.v1" ) func (s *aztestsSuite) TestPutGetPages(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer delContainer(c, container) blob, _ := createNewPageBlob(c, container) pageRange := PageRange{Start: 0, End: 1023} putResp, err := blob.UploadPages(context.Background(), 0, getReaderToRandomBytes(1024), PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(putResp.Response().StatusCode, chk.Equals, 201) c.Assert(putResp.LastModified().IsZero(), chk.Equals, false) c.Assert(putResp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(putResp.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(putResp.BlobSequenceNumber(), chk.Equals, int64(0)) c.Assert(putResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(putResp.Version(), chk.Not(chk.Equals), "") c.Assert(putResp.Date().IsZero(), chk.Equals, false) pageList, err := blob.GetPageRanges(context.Background(), 0, 1023, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(pageList.Response().StatusCode, chk.Equals, 200) c.Assert(pageList.LastModified().IsZero(), chk.Equals, false) c.Assert(pageList.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(pageList.BlobContentLength(), chk.Equals, int64(512*10)) c.Assert(pageList.RequestID(), chk.Not(chk.Equals), "") c.Assert(pageList.Version(), chk.Not(chk.Equals), "") c.Assert(pageList.Date().IsZero(), chk.Equals, false) c.Assert(pageList.PageRange, chk.HasLen, 1) c.Assert(pageList.PageRange[0], chk.DeepEquals, pageRange) } func (s *aztestsSuite) TestUploadPagesFromURL(c *chk.C) { bsu := getBSU() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } container, _ := createNewContainer(c, bsu) defer delContainer(c, container) testSize := 4 * 1024 * 1024 // 4MB r, sourceData := getRandomDataAndReader(testSize) ctx := context.Background() // Use default Background context srcBlob, _ := createNewPageBlobWithSize(c, container, int64(testSize)) destBlob, _ := createNewPageBlobWithSize(c, container, int64(testSize)) // Prepare source blob for copy. uploadSrcResp1, err := srcBlob.UploadPages(ctx, 0, r, PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(uploadSrcResp1.Response().StatusCode, chk.Equals, 201) // Get source blob URL with SAS for UploadPagesFromURL. srcBlobParts := NewBlobURLParts(srcBlob.URL()) srcBlobParts.SAS, err = BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration ContainerName: srcBlobParts.ContainerName, BlobName: srcBlobParts.BlobName, Permissions: BlobSASPermissions{Read: true}.String(), }.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } srcBlobURLWithSAS := srcBlobParts.URL() // Upload page from URL. pResp1, err := destBlob.UploadPagesFromURL(ctx, srcBlobURLWithSAS, 0, 0, int64(testSize), nil, PageBlobAccessConditions{}, ModifiedAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(pResp1.ETag(), chk.NotNil) c.Assert(pResp1.LastModified(), chk.NotNil) c.Assert(pResp1.Response().StatusCode, chk.Equals, 201) c.Assert(pResp1.ContentMD5(), chk.Not(chk.Equals), "") c.Assert(pResp1.RequestID(), chk.Not(chk.Equals), "") c.Assert(pResp1.Version(), chk.Not(chk.Equals), "") c.Assert(pResp1.Date().IsZero(), chk.Equals, false) // Check data integrity through downloading. downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{})) c.Assert(err, chk.IsNil) c.Assert(destData, chk.DeepEquals, sourceData) } func (s *aztestsSuite) TestUploadPagesFromURLWithMD5(c *chk.C) { bsu := getBSU() credential, err := getGenericCredential("") if err != nil { c.Fatal("Invalid credential") } container, _ := createNewContainer(c, bsu) defer delContainer(c, container) testSize := 4 * 1024 * 1024 // 4MB r, sourceData := getRandomDataAndReader(testSize) md5Value := md5.Sum(sourceData) ctx := context.Background() // Use default Background context srcBlob, _ := createNewPageBlobWithSize(c, container, int64(testSize)) destBlob, _ := createNewPageBlobWithSize(c, container, int64(testSize)) // Prepare source blob for copy. uploadSrcResp1, err := srcBlob.UploadPages(ctx, 0, r, PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) c.Assert(uploadSrcResp1.Response().StatusCode, chk.Equals, 201) // Get source blob URL with SAS for UploadPagesFromURL. srcBlobParts := NewBlobURLParts(srcBlob.URL()) srcBlobParts.SAS, err = BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, // Users MUST use HTTPS (not HTTP) ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration ContainerName: srcBlobParts.ContainerName, BlobName: srcBlobParts.BlobName, Permissions: BlobSASPermissions{Read: true}.String(), }.NewSASQueryParameters(credential) if err != nil { c.Fatal(err) } srcBlobURLWithSAS := srcBlobParts.URL() // Upload page from URL with MD5. pResp1, err := destBlob.UploadPagesFromURL(ctx, srcBlobURLWithSAS, 0, 0, int64(testSize), md5Value[:], PageBlobAccessConditions{}, ModifiedAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(pResp1.ETag(), chk.NotNil) c.Assert(pResp1.LastModified(), chk.NotNil) c.Assert(pResp1.Response().StatusCode, chk.Equals, 201) c.Assert(pResp1.RequestID(), chk.Not(chk.Equals), "") c.Assert(pResp1.Version(), chk.Not(chk.Equals), "") c.Assert(pResp1.Date().IsZero(), chk.Equals, false) c.Assert(pResp1.ContentMD5(), chk.DeepEquals, md5Value[:]) c.Assert(pResp1.BlobSequenceNumber(), chk.Equals, int64(0)) // Check data integrity through downloading. downloadResp, err := destBlob.BlobURL.Download(ctx, 0, CountToEnd, BlobAccessConditions{}, false) c.Assert(err, chk.IsNil) destData, err := ioutil.ReadAll(downloadResp.Body(RetryReaderOptions{})) c.Assert(err, chk.IsNil) c.Assert(destData, chk.DeepEquals, sourceData) // Upload page from URL with bad MD5 _, badMD5 := getRandomDataAndReader(16) _, err = destBlob.UploadPagesFromURL(ctx, srcBlobURLWithSAS, 0, 0, int64(testSize), badMD5[:], PageBlobAccessConditions{}, ModifiedAccessConditions{}) validateStorageError(c, err, ServiceCodeMd5Mismatch) } func (s *aztestsSuite) TestClearDiffPages(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer delContainer(c, container) blob, _ := createNewPageBlob(c, container) _, err := blob.UploadPages(context.Background(), 0, getReaderToRandomBytes(2048), PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) snapshotResp, err := blob.CreateSnapshot(context.Background(), nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blob.UploadPages(context.Background(), 2048, getReaderToRandomBytes(2048), PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) pageList, err := blob.GetPageRangesDiff(context.Background(), 0, 4096, snapshotResp.Snapshot(), BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(pageList.PageRange, chk.HasLen, 1) c.Assert(pageList.PageRange[0].Start, chk.Equals, int64(2048)) c.Assert(pageList.PageRange[0].End, chk.Equals, int64(4095)) clearResp, err := blob.ClearPages(context.Background(), 2048, 2048, PageBlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(clearResp.Response().StatusCode, chk.Equals, 201) pageList, err = blob.GetPageRangesDiff(context.Background(), 0, 4095, snapshotResp.Snapshot(), BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(pageList.PageRange, chk.HasLen, 0) } func (s *aztestsSuite) TestIncrementalCopy(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer delContainer(c, container) _, err := container.SetAccessPolicy(context.Background(), PublicAccessBlob, nil, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) srcBlob, _ := createNewPageBlob(c, container) _, err = srcBlob.UploadPages(context.Background(), 0, getReaderToRandomBytes(1024), PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) snapshotResp, err := srcBlob.CreateSnapshot(context.Background(), nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) dstBlob := container.NewPageBlobURL(generateBlobName()) resp, err := dstBlob.StartCopyIncremental(context.Background(), srcBlob.URL(), snapshotResp.Snapshot(), BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 202) c.Assert(resp.LastModified().IsZero(), chk.Equals, false) c.Assert(resp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(resp.Version(), chk.Not(chk.Equals), "") c.Assert(resp.Date().IsZero(), chk.Equals, false) c.Assert(resp.CopyID(), chk.Not(chk.Equals), "") c.Assert(resp.CopyStatus(), chk.Equals, CopyStatusPending) waitForIncrementalCopy(c, dstBlob, resp) } func (s *aztestsSuite) TestResizePageBlob(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer delContainer(c, container) blob, _ := createNewPageBlob(c, container) resp, err := blob.Resize(context.Background(), 2048, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 200) resp, err = blob.Resize(context.Background(), 8192, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 200) resp2, err := blob.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp2.ContentLength(), chk.Equals, int64(8192)) } func (s *aztestsSuite) TestPageSequenceNumbers(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) blob, _ := createNewPageBlob(c, container) defer delContainer(c, container) resp, err := blob.UpdateSequenceNumber(context.Background(), SequenceNumberActionIncrement, 0, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 200) resp, err = blob.UpdateSequenceNumber(context.Background(), SequenceNumberActionMax, 7, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 200) resp, err = blob.UpdateSequenceNumber(context.Background(), SequenceNumberActionUpdate, 11, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 200) } func (s *aztestsSuite) TestPutPagesWithMD5(c *chk.C) { bsu := getBSU() container, _ := createNewContainer(c, bsu) defer delContainer(c, container) blob, _ := createNewPageBlob(c, container) // put page with valid MD5 readerToBody, body := getRandomDataAndReader(1024) md5Value := md5.Sum(body) putResp, err := blob.UploadPages(context.Background(), 0, readerToBody, PageBlobAccessConditions{}, md5Value[:]) c.Assert(err, chk.IsNil) c.Assert(putResp.Response().StatusCode, chk.Equals, 201) c.Assert(putResp.LastModified().IsZero(), chk.Equals, false) c.Assert(putResp.ETag(), chk.Not(chk.Equals), ETagNone) c.Assert(putResp.ContentMD5(), chk.DeepEquals, md5Value[:]) c.Assert(putResp.BlobSequenceNumber(), chk.Equals, int64(0)) c.Assert(putResp.RequestID(), chk.Not(chk.Equals), "") c.Assert(putResp.Version(), chk.Not(chk.Equals), "") c.Assert(putResp.Date().IsZero(), chk.Equals, false) // put page with bad MD5 readerToBody, body = getRandomDataAndReader(1024) _, badMD5 := getRandomDataAndReader(16) putResp, err = blob.UploadPages(context.Background(), 0, readerToBody, PageBlobAccessConditions{}, badMD5[:]) validateStorageError(c, err, ServiceCodeMd5Mismatch) } func (s *aztestsSuite) TestBlobCreatePageSizeInvalid(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getPageBlobURL(c, containerURL) _, err := blobURL.Create(ctx, 1, 0, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeInvalidHeaderValue) } func (s *aztestsSuite) TestBlobCreatePageSequenceInvalid(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getPageBlobURL(c, containerURL) _, err := blobURL.Create(ctx, PageBlobPageBytes, -1, BlobHTTPHeaders{}, nil, BlobAccessConditions{}) c.Assert(err, chk.Not(chk.IsNil)) } func (s *aztestsSuite) TestBlobCreatePageMetadataNonEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getPageBlobURL(c, containerURL) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{}) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobCreatePageMetadataEmpty(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getPageBlobURL(c, containerURL) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.HasLen, 0) } func (s *aztestsSuite) TestBlobCreatePageMetadataInvalid(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getPageBlobURL(c, containerURL) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, Metadata{"In valid1": "bar"}, BlobAccessConditions{}) c.Assert(strings.Contains(err.Error(), invalidHeaderErrorSubstring), chk.Equals, true) } func (s *aztestsSuite) TestBlobCreatePageHTTPHeaders(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getPageBlobURL(c, containerURL) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, basicHeaders, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) h := resp.NewHTTPHeaders() c.Assert(h, chk.DeepEquals, basicHeaders) } func validatePageBlobPut(c *chk.C, blobURL PageBlobURL) { resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.NewMetadata(), chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestBlobCreatePageIfModifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata currentTime := getRelativeTimeGMT(-10) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validatePageBlobPut(c, blobURL) } func (s *aztestsSuite) TestBlobCreatePageIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata currentTime := getRelativeTimeGMT(10) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobCreatePageIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata currentTime := getRelativeTimeGMT(10) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validatePageBlobPut(c, blobURL) } func (s *aztestsSuite) TestBlobCreatePageIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata currentTime := getRelativeTimeGMT(-10) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobCreatePageIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validatePageBlobPut(c, blobURL) } func (s *aztestsSuite) TestBlobCreatePageIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobCreatePageIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validatePageBlobPut(c, blobURL) } func (s *aztestsSuite) TestBlobCreatePageIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) // Originally created without metadata resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.Create(ctx, PageBlobPageBytes, 0, BlobHTTPHeaders{}, basicMetadata, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutPagesInvalidRange(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, strings.NewReader(blockBlobDefaultData), PageBlobAccessConditions{}, nil) c.Assert(err, chk.Not(chk.IsNil)) } func (s *aztestsSuite) TestBlobPutPagesNilBody(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, nil, PageBlobAccessConditions{}, nil) c.Assert(err, chk.Not(chk.IsNil)) } func (s *aztestsSuite) TestBlobPutPagesEmptyBody(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, bytes.NewReader([]byte{}), PageBlobAccessConditions{}, nil) c.Assert(err, chk.Not(chk.IsNil)) } func (s *aztestsSuite) TestBlobPutPagesNonExistantBlob(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := getPageBlobURL(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{}, nil) validateStorageError(c, err, ServiceCodeBlobNotFound) } func validateUploadPages(c *chk.C, blobURL PageBlobURL) { // This will only validate a single put page at 0-PageBlobPageBytes-1 resp, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.PageRange[0], chk.Equals, PageRange{Start: 0, End: PageBlobPageBytes - 1}) } func (s *aztestsSuite) TestBlobPutPagesIfModifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, nil) c.Assert(err, chk.IsNil) validateUploadPages(c, blobURL) } func (s *aztestsSuite) TestBlobPutPagesIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}, nil) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutPagesIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, nil) c.Assert(err, chk.IsNil) validateUploadPages(c, blobURL) } func (s *aztestsSuite) TestBlobPutPagesIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}, nil) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutPagesIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}, nil) c.Assert(err, chk.IsNil) validateUploadPages(c, blobURL) } func (s *aztestsSuite) TestBlobPutPagesIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}, nil) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutPagesIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}, nil) c.Assert(err, chk.IsNil) validateUploadPages(c, blobURL) } func (s *aztestsSuite) TestBlobPutPagesIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}, nil) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobPutPagesIfSequenceNumberLessThanTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThan: 10}}, nil) c.Assert(err, chk.IsNil) validateUploadPages(c, blobURL) } func (s *aztestsSuite) TestBlobPutPagesIfSequenceNumberLessThanFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionUpdate, 10, BlobAccessConditions{}) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThan: 1}}, nil) validateStorageError(c, err, ServiceCodeSequenceNumberConditionNotMet) } func (s *aztestsSuite) TestBlobPutPagesIfSequenceNumberLessThanNegOne(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThan: -1}}, nil) // This will cause the library to set the value of the header to 0 validateStorageError(c, err, ServiceCodeSequenceNumberConditionNotMet) } func (s *aztestsSuite) TestBlobPutPagesIfSequenceNumberLTETrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionUpdate, 1, BlobAccessConditions{}) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThanOrEqual: 1}}, nil) c.Assert(err, chk.IsNil) validateUploadPages(c, blobURL) } func (s *aztestsSuite) TestBlobPutPagesIfSequenceNumberLTEqualFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionUpdate, 10, BlobAccessConditions{}) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThanOrEqual: 1}}, nil) validateStorageError(c, err, ServiceCodeSequenceNumberConditionNotMet) } func (s *aztestsSuite) TestBlobPutPagesIfSequenceNumberLTENegOne(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThanOrEqual: -1}}, nil) // This will cause the library to set the value of the header to 0 c.Assert(err, chk.IsNil) validateUploadPages(c, blobURL) } func (s *aztestsSuite) TestBlobPutPagesIfSequenceNumberEqualTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionUpdate, 1, BlobAccessConditions{}) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberEqual: 1}}, nil) c.Assert(err, chk.IsNil) validateUploadPages(c, blobURL) } func (s *aztestsSuite) TestBlobPutPagesIfSequenceNumberEqualFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberEqual: 1}}, nil) validateStorageError(c, err, ServiceCodeSequenceNumberConditionNotMet) } func (s *aztestsSuite) TestBlobPutPagesIfSequenceNumberEqualNegOne(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberEqual: -1}}, nil) // This will cause the library to set the value of the header to 0 c.Assert(err, chk.IsNil) validateUploadPages(c, blobURL) } func setupClearPagesTest(c *chk.C) (ContainerURL, PageBlobURL) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) return containerURL, blobURL } func validateClearPagesTest(c *chk.C, blobURL PageBlobURL) { resp, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.PageRange, chk.HasLen, 0) } func (s *aztestsSuite) TestBlobClearPagesInvalidRange(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes+1, PageBlobAccessConditions{}) c.Assert(err, chk.Not(chk.IsNil)) } func (s *aztestsSuite) TestBlobClearPagesIfModifiedSinceTrue(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateClearPagesTest(c, blobURL) } func (s *aztestsSuite) TestBlobClearPagesIfModifiedSinceFalse(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobClearPagesIfUnmodifiedSinceTrue(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateClearPagesTest(c, blobURL) } func (s *aztestsSuite) TestBlobClearPagesIfUnmodifiedSinceFalse(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobClearPagesIfMatchTrue(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validateClearPagesTest(c, blobURL) } func (s *aztestsSuite) TestBlobClearPagesIfMatchFalse(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobClearPagesIfNoneMatchTrue(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validateClearPagesTest(c, blobURL) } func (s *aztestsSuite) TestBlobClearPagesIfNoneMatchFalse(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobClearPagesIfSequenceNumberLessThanTrue(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThan: 10}}) c.Assert(err, chk.IsNil) validateClearPagesTest(c, blobURL) } func (s *aztestsSuite) TestBlobClearPagesIfSequenceNumberLessThanFalse(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionUpdate, 10, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThan: 1}}) validateStorageError(c, err, ServiceCodeSequenceNumberConditionNotMet) } func (s *aztestsSuite) TestBlobClearPagesIfSequenceNumberLessThanNegOne(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThan: -1}}) // This will cause the library to set the value of the header to 0 validateStorageError(c, err, ServiceCodeSequenceNumberConditionNotMet) } func (s *aztestsSuite) TestBlobClearPagesIfSequenceNumberLTETrue(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThanOrEqual: 10}}) c.Assert(err, chk.IsNil) validateClearPagesTest(c, blobURL) } func (s *aztestsSuite) TestBlobClearPagesIfSequenceNumberLTEFalse(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionUpdate, 10, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThanOrEqual: 1}}) validateStorageError(c, err, ServiceCodeSequenceNumberConditionNotMet) } func (s *aztestsSuite) TestBlobClearPagesIfSequenceNumberLTENegOne(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberLessThanOrEqual: -1}}) // This will cause the library to set the value of the header to 0 c.Assert(err, chk.IsNil) validateClearPagesTest(c, blobURL) } func (s *aztestsSuite) TestBlobClearPagesIfSequenceNumberEqualTrue(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionUpdate, 10, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberEqual: 10}}) c.Assert(err, chk.IsNil) validateClearPagesTest(c, blobURL) } func (s *aztestsSuite) TestBlobClearPagesIfSequenceNumberEqualFalse(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionUpdate, 10, BlobAccessConditions{}) c.Assert(err, chk.IsNil) _, err = blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberEqual: 1}}) validateStorageError(c, err, ServiceCodeSequenceNumberConditionNotMet) } func (s *aztestsSuite) TestBlobClearPagesIfSequenceNumberEqualNegOne(c *chk.C) { containerURL, blobURL := setupClearPagesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.ClearPages(ctx, 0, PageBlobPageBytes, PageBlobAccessConditions{SequenceNumberAccessConditions: SequenceNumberAccessConditions{IfSequenceNumberEqual: -1}}) // This will cause the library to set the value of the header to 0 c.Assert(err, chk.IsNil) validateClearPagesTest(c, blobURL) } func setupGetPageRangesTest(c *chk.C) (containerURL ContainerURL, blobURL PageBlobURL) { bsu := getBSU() containerURL, _ = createNewContainer(c, bsu) blobURL, _ = createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) return } func validateBasicGetPageRanges(c *chk.C, resp *PageList, err error) { c.Assert(err, chk.IsNil) c.Assert(resp.PageRange, chk.HasLen, 1) c.Assert(resp.PageRange[0], chk.Equals, PageRange{Start: 0, End: PageBlobPageBytes - 1}) } func (s *aztestsSuite) TestBlobGetPageRangesEmptyBlob(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) resp, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.PageRange, chk.HasLen, 0) } func (s *aztestsSuite) TestBlobGetPageRangesEmptyRange(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) resp, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{}) validateBasicGetPageRanges(c, resp, err) } func (s *aztestsSuite) TestBlobGetPageRangesInvalidRange(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.GetPageRanges(ctx, -2, 500, BlobAccessConditions{}) c.Assert(err, chk.IsNil) } func (s *aztestsSuite) TestBlobGetPageRangesNonContiguousRanges(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.UploadPages(ctx, PageBlobPageBytes*2, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) resp, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.PageRange, chk.HasLen, 2) c.Assert(resp.PageRange[0], chk.Equals, PageRange{Start: 0, End: PageBlobPageBytes - 1}) c.Assert(resp.PageRange[1], chk.Equals, PageRange{Start: PageBlobPageBytes * 2, End: (PageBlobPageBytes * 3) - 1}) } func (s *aztestsSuite) TestblobGetPageRangesNotPageAligned(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) resp, err := blobURL.GetPageRanges(ctx, 0, 2000, BlobAccessConditions{}) c.Assert(err, chk.IsNil) validateBasicGetPageRanges(c, resp, err) } func (s *aztestsSuite) TestBlobGetPageRangesSnapshot(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) resp, _ := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) snapshotURL := blobURL.WithSnapshot(resp.Snapshot()) resp2, err := snapshotURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{}) c.Assert(err, chk.IsNil) validateBasicGetPageRanges(c, resp2, err) } func (s *aztestsSuite) TestBlobGetPageRangesIfModifiedSinceTrue(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(-10) resp, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateBasicGetPageRanges(c, resp, err) } func (s *aztestsSuite) TestBlobGetPageRangesIfModifiedSinceFalse(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 304) // Service Code not returned in the body for a HEAD } func (s *aztestsSuite) TestBlobGetPageRangesIfUnmodifiedSinceTrue(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) resp, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateBasicGetPageRanges(c, resp, err) } func (s *aztestsSuite) TestBlobGetPageRangesIfUnmodifiedSinceFalse(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobGetPageRangesIfMatchTrue(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) resp2, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) validateBasicGetPageRanges(c, resp2, err) } func (s *aztestsSuite) TestBlobGetPageRangesIfMatchFalse(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobGetPageRangesIfNoneMatchTrue(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) resp, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) validateBasicGetPageRanges(c, resp, err) } func (s *aztestsSuite) TestBlobGetPageRangesIfNoneMatchFalse(c *chk.C) { containerURL, blobURL := setupGetPageRangesTest(c) defer deleteContainer(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.GetPageRanges(ctx, 0, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 304) // Service Code not returned in the body for a HEAD } func setupDiffPageRangesTest(c *chk.C) (containerURL ContainerURL, blobURL PageBlobURL, snapshot string) { bsu := getBSU() containerURL, _ = createNewContainer(c, bsu) blobURL, _ = createNewPageBlob(c, containerURL) _, err := blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) resp, err := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) c.Assert(err, chk.IsNil) snapshot = resp.Snapshot() _, err = blobURL.UploadPages(ctx, 0, getReaderToRandomBytes(PageBlobPageBytes), PageBlobAccessConditions{}, nil) c.Assert(err, chk.IsNil) // This ensures there is a diff on the first page return } func validateDiffPageRanges(c *chk.C, resp *PageList, err error) { c.Assert(err, chk.IsNil) c.Assert(resp.PageRange, chk.HasLen, 1) c.Assert(resp.PageRange[0].Start, chk.Equals, int64(0)) c.Assert(resp.PageRange[0].End, chk.Equals, int64(PageBlobPageBytes-1)) } func (s *aztestsSuite) TestBlobDiffPageRangesNonExistantSnapshot(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) snapshotTime, _ := time.Parse(SnapshotTimeFormat, snapshot) snapshotTime = snapshotTime.Add(time.Minute) _, err := blobURL.GetPageRangesDiff(ctx, 0, 0, snapshotTime.Format(SnapshotTimeFormat), BlobAccessConditions{}) validateStorageError(c, err, ServiceCodePreviousSnapshotNotFound) } func (s *aztestsSuite) TestBlobDiffPageRangeInvalidRange(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.GetPageRangesDiff(ctx, -22, 14, snapshot, BlobAccessConditions{}) c.Assert(err, chk.IsNil) } func (s *aztestsSuite) TestBlobDiffPageRangeIfModifiedSinceTrue(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(-10) resp, err := blobURL.GetPageRangesDiff(ctx, 0, 0, snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateDiffPageRanges(c, resp, err) } func (s *aztestsSuite) TestBlobDiffPageRangeIfModifiedSinceFalse(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.GetPageRangesDiff(ctx, 0, 0, snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 304) // Service Code not returned in the body for a HEAD } func (s *aztestsSuite) TestBlobDiffPageRangeIfUnmodifiedSinceTrue(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(10) resp, err := blobURL.GetPageRangesDiff(ctx, 0, 0, snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateDiffPageRanges(c, resp, err) } func (s *aztestsSuite) TestBlobDiffPageRangeIfUnmodifiedSinceFalse(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.GetPageRangesDiff(ctx, 0, 0, snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobDiffPageRangeIfMatchTrue(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) resp2, err := blobURL.GetPageRangesDiff(ctx, 0, 0, snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) validateDiffPageRanges(c, resp2, err) } func (s *aztestsSuite) TestBlobDiffPageRangeIfMatchFalse(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) _, err := blobURL.GetPageRangesDiff(ctx, 0, 0, snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobDiffPageRangeIfNoneMatchTrue(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) resp, err := blobURL.GetPageRangesDiff(ctx, 0, 0, snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) validateDiffPageRanges(c, resp, err) } func (s *aztestsSuite) TestBlobDiffPageRangeIfNoneMatchFalse(c *chk.C) { containerURL, blobURL, snapshot := setupDiffPageRangesTest(c) defer deleteContainer(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.GetPageRangesDiff(ctx, 0, 0, snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) serr := err.(StorageError) c.Assert(serr.Response().StatusCode, chk.Equals, 304) // Service Code not returned in the body for a HEAD } func (s *aztestsSuite) TestBlobResizeZero(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) // The default blob is created with size > 0, so this should actually update _, err := blobURL.Resize(ctx, 0, BlobAccessConditions{}) c.Assert(err, chk.IsNil) resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.ContentLength(), chk.Equals, int64(0)) } func (s *aztestsSuite) TestBlobResizeInvalidSizeNegative(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.Resize(ctx, -4, BlobAccessConditions{}) c.Assert(err, chk.Not(chk.IsNil)) } func (s *aztestsSuite) TestBlobResizeInvalidSizeMisaligned(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.Resize(ctx, 12, BlobAccessConditions{}) c.Assert(err, chk.Not(chk.IsNil)) } func validateResize(c *chk.C, blobURL PageBlobURL) { resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(resp.ContentLength(), chk.Equals, int64(PageBlobPageBytes)) } func (s *aztestsSuite) TestBlobResizeIfModifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.Resize(ctx, PageBlobPageBytes, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateResize(c, blobURL) } func (s *aztestsSuite) TestBlobResizeIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.Resize(ctx, PageBlobPageBytes, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobResizeIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.Resize(ctx, PageBlobPageBytes, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateResize(c, blobURL) } func (s *aztestsSuite) TestBlobResizeIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.Resize(ctx, PageBlobPageBytes, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobResizeIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.Resize(ctx, PageBlobPageBytes, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validateResize(c, blobURL) } func (s *aztestsSuite) TestBlobResizeIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.Resize(ctx, PageBlobPageBytes, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobResizeIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.Resize(ctx, PageBlobPageBytes, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validateResize(c, blobURL) } func (s *aztestsSuite) TestBlobResizeIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.Resize(ctx, PageBlobPageBytes, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetSequenceNumberActionTypeInvalid(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionType("garbage"), 1, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeInvalidHeaderValue) } func (s *aztestsSuite) TestBlobSetSequenceNumberSequenceNumberInvalid(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) defer func() { // Invalid sequence number should panic recover() }() blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionUpdate, -1, BlobAccessConditions{}) } func validateSequenceNumberSet(c *chk.C, blobURL PageBlobURL) { resp, err := blobURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) c.Assert(resp.BlobSequenceNumber(), chk.Equals, int64(1)) } func (s *aztestsSuite) TestBlobSetSequenceNumberIfModifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionIncrement, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateSequenceNumberSet(c, blobURL) } func (s *aztestsSuite) TestBlobSetSequenceNumberIfModifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionIncrement, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetSequenceNumberIfUnmodifiedSinceTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(10) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionIncrement, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateSequenceNumberSet(c, blobURL) } func (s *aztestsSuite) TestBlobSetSequenceNumberIfUnmodifiedSinceFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) currentTime := getRelativeTimeGMT(-10) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionIncrement, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetSequenceNumberIfMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionIncrement, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validateSequenceNumberSet(c, blobURL) } func (s *aztestsSuite) TestBlobSetSequenceNumberIfMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionIncrement, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobSetSequenceNumberIfNoneMatchTrue(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionIncrement, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validateSequenceNumberSet(c, blobURL) } func (s *aztestsSuite) TestBlobSetSequenceNumberIfNoneMatchFalse(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) resp, _ := blobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := blobURL.UpdateSequenceNumber(ctx, SequenceNumberActionIncrement, 0, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func waitForIncrementalCopy(c *chk.C, copyBlobURL PageBlobURL, blobCopyResponse *PageBlobCopyIncrementalResponse) string { status := blobCopyResponse.CopyStatus() var getPropertiesAndMetadataResult *BlobGetPropertiesResponse // Wait for the copy to finish start := time.Now() for status != CopyStatusSuccess { getPropertiesAndMetadataResult, _ = copyBlobURL.GetProperties(ctx, BlobAccessConditions{}) status = getPropertiesAndMetadataResult.CopyStatus() currentTime := time.Now() if currentTime.Sub(start) >= time.Minute { c.Fail() } } return getPropertiesAndMetadataResult.DestinationSnapshot() } func setupStartIncrementalCopyTest(c *chk.C) (containerURL ContainerURL, blobURL PageBlobURL, copyBlobURL PageBlobURL, snapshot string) { bsu := getBSU() containerURL, _ = createNewContainer(c, bsu) containerURL.SetAccessPolicy(ctx, PublicAccessBlob, nil, ContainerAccessConditions{}) blobURL, _ = createNewPageBlob(c, containerURL) resp, _ := blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) copyBlobURL, _ = getPageBlobURL(c, containerURL) // Must create the incremental copy blob so that the access conditions work on it resp2, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), resp.Snapshot(), BlobAccessConditions{}) c.Assert(err, chk.IsNil) waitForIncrementalCopy(c, copyBlobURL, resp2) resp, _ = blobURL.CreateSnapshot(ctx, nil, BlobAccessConditions{}) // Take a new snapshot so the next copy will succeed snapshot = resp.Snapshot() return } func validateIncrementalCopy(c *chk.C, copyBlobURL PageBlobURL, resp *PageBlobCopyIncrementalResponse) { t := waitForIncrementalCopy(c, copyBlobURL, resp) // If we can access the snapshot without error, we are satisfied that it was created as a result of the copy copySnapshotURL := copyBlobURL.WithSnapshot(t) _, err := copySnapshotURL.GetProperties(ctx, BlobAccessConditions{}) c.Assert(err, chk.IsNil) } func (s *aztestsSuite) TestBlobStartIncrementalCopySnapshotNotExist(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) blobURL, _ := createNewPageBlob(c, containerURL) copyBlobURL, _ := getPageBlobURL(c, containerURL) snapshot := time.Now().UTC().Format(SnapshotTimeFormat) _, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), snapshot, BlobAccessConditions{}) validateStorageError(c, err, ServiceCodeCannotVerifyCopySource) } func (s *aztestsSuite) TestBlobStartIncrementalCopyIfModifiedSinceTrue(c *chk.C) { containerURL, blobURL, copyBlobURL, snapshot := setupStartIncrementalCopyTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(-20) resp, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateIncrementalCopy(c, copyBlobURL, resp) } func (s *aztestsSuite) TestBlobStartIncrementalCopyIfModifiedSinceFalse(c *chk.C) { containerURL, blobURL, copyBlobURL, snapshot := setupStartIncrementalCopyTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(20) _, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfModifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobStartIncrementalCopyIfUnmodifiedSinceTrue(c *chk.C) { containerURL, blobURL, copyBlobURL, snapshot := setupStartIncrementalCopyTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(20) resp, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) c.Assert(err, chk.IsNil) validateIncrementalCopy(c, copyBlobURL, resp) } func (s *aztestsSuite) TestBlobStartIncrementalCopyIfUnmodifiedSinceFalse(c *chk.C) { containerURL, blobURL, copyBlobURL, snapshot := setupStartIncrementalCopyTest(c) defer deleteContainer(c, containerURL) currentTime := getRelativeTimeGMT(-20) _, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfUnmodifiedSince: currentTime}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } func (s *aztestsSuite) TestBlobStartIncrementalCopyIfMatchTrue(c *chk.C) { containerURL, blobURL, copyBlobURL, snapshot := setupStartIncrementalCopyTest(c) defer deleteContainer(c, containerURL) resp, _ := copyBlobURL.GetProperties(ctx, BlobAccessConditions{}) resp2, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: resp.ETag()}}) c.Assert(err, chk.IsNil) validateIncrementalCopy(c, copyBlobURL, resp2) } func (s *aztestsSuite) TestBlobStartIncrementalCopyIfMatchFalse(c *chk.C) { containerURL, blobURL, copyBlobURL, snapshot := setupStartIncrementalCopyTest(c) defer deleteContainer(c, containerURL) _, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: ETag("garbage")}}) validateStorageError(c, err, ServiceCodeTargetConditionNotMet) } func (s *aztestsSuite) TestBlobStartIncrementalCopyIfNoneMatchTrue(c *chk.C) { containerURL, blobURL, copyBlobURL, snapshot := setupStartIncrementalCopyTest(c) defer deleteContainer(c, containerURL) resp, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: ETag("garbage")}}) c.Assert(err, chk.IsNil) validateIncrementalCopy(c, copyBlobURL, resp) } func (s *aztestsSuite) TestBlobStartIncrementalCopyIfNoneMatchFalse(c *chk.C) { containerURL, blobURL, copyBlobURL, snapshot := setupStartIncrementalCopyTest(c) defer deleteContainer(c, containerURL) resp, _ := copyBlobURL.GetProperties(ctx, BlobAccessConditions{}) _, err := copyBlobURL.StartCopyIncremental(ctx, blobURL.URL(), snapshot, BlobAccessConditions{ModifiedAccessConditions: ModifiedAccessConditions{IfNoneMatch: resp.ETag()}}) validateStorageError(c, err, ServiceCodeConditionNotMet) } azure-storage-blob-go-0.10.0/azblob/zt_url_service_test.go000066400000000000000000000301341367515646300236160ustar00rootroot00000000000000package azblob import ( "context" "strings" "time" chk "gopkg.in/check.v1" // go get gopkg.in/check.v1 ) func (s *aztestsSuite) TestGetAccountInfo(c *chk.C) { sa := getBSU() // Ensure the call succeeded. Don't test for specific account properties because we can't/don't want to set account properties. sAccInfo, err := sa.GetAccountInfo(context.Background()) c.Assert(err, chk.IsNil) c.Assert(*sAccInfo, chk.Not(chk.DeepEquals), ServiceGetAccountInfoResponse{}) // Test on a container cURL := sa.NewContainerURL(generateContainerName()) _, err = cURL.Create(ctx, Metadata{}, PublicAccessNone) c.Assert(err, chk.IsNil) cAccInfo, err := cURL.GetAccountInfo(ctx) c.Assert(err, chk.IsNil) c.Assert(*cAccInfo, chk.Not(chk.DeepEquals), ContainerGetAccountInfoResponse{}) // test on a block blob URL. They all call the same thing on the base URL, so only one test is needed for that. bbURL := cURL.NewBlockBlobURL(generateBlobName()) _, err = bbURL.Upload(ctx, strings.NewReader("blah"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) c.Assert(err, chk.IsNil) bAccInfo, err := bbURL.GetAccountInfo(ctx) c.Assert(err, chk.IsNil) c.Assert(*bAccInfo, chk.Not(chk.DeepEquals), BlobGetAccountInfoResponse{}) } func (s *aztestsSuite) TestListContainers(c *chk.C) { sa := getBSU() resp, err := sa.ListContainersSegment(context.Background(), Marker{}, ListContainersSegmentOptions{Prefix: containerPrefix}) c.Assert(err, chk.IsNil) c.Assert(resp.Response().StatusCode, chk.Equals, 200) c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") c.Assert(resp.Version(), chk.Not(chk.Equals), "") c.Assert(len(resp.ContainerItems) >= 0, chk.Equals, true) c.Assert(resp.ServiceEndpoint, chk.NotNil) container, name := createNewContainer(c, sa) defer delContainer(c, container) md := Metadata{ "foo": "foovalue", "bar": "barvalue", } _, err = container.SetMetadata(context.Background(), md, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) resp, err = sa.ListContainersSegment(context.Background(), Marker{}, ListContainersSegmentOptions{Detail: ListContainersDetail{Metadata: true}, Prefix: name}) c.Assert(err, chk.IsNil) c.Assert(resp.ContainerItems, chk.HasLen, 1) c.Assert(resp.ContainerItems[0].Name, chk.NotNil) c.Assert(resp.ContainerItems[0].Properties, chk.NotNil) c.Assert(resp.ContainerItems[0].Properties.LastModified, chk.NotNil) c.Assert(resp.ContainerItems[0].Properties.Etag, chk.NotNil) c.Assert(resp.ContainerItems[0].Properties.LeaseStatus, chk.Equals, LeaseStatusUnlocked) c.Assert(resp.ContainerItems[0].Properties.LeaseState, chk.Equals, LeaseStateAvailable) c.Assert(string(resp.ContainerItems[0].Properties.LeaseDuration), chk.Equals, "") c.Assert(string(resp.ContainerItems[0].Properties.PublicAccess), chk.Equals, string(PublicAccessNone)) c.Assert(resp.ContainerItems[0].Metadata, chk.DeepEquals, md) } func (s *aztestsSuite) TestListContainersPaged(c *chk.C) { sa := getBSU() const numContainers = 4 const maxResultsPerPage = 2 const pagedContainersPrefix = "azblobspagedtest" containers := make([]ContainerURL, numContainers) for i := 0; i < numContainers; i++ { containers[i], _ = createNewContainerWithSuffix(c, sa, pagedContainersPrefix) } defer func() { for i := range containers { delContainer(c, containers[i]) } }() marker := Marker{} iterations := numContainers / maxResultsPerPage for i := 0; i < iterations; i++ { resp, err := sa.ListContainersSegment(context.Background(), marker, ListContainersSegmentOptions{MaxResults: maxResultsPerPage, Prefix: containerPrefix + pagedContainersPrefix}) c.Assert(err, chk.IsNil) c.Assert(resp.ContainerItems, chk.HasLen, maxResultsPerPage) hasMore := i < iterations-1 c.Assert(resp.NextMarker.NotDone(), chk.Equals, hasMore) marker = resp.NextMarker } } func (s *aztestsSuite) TestAccountListContainersEmptyPrefix(c *chk.C) { bsu := getBSU() containerURL1, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL1) containerURL2, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL2) response, err := bsu.ListContainersSegment(ctx, Marker{}, ListContainersSegmentOptions{}) c.Assert(err, chk.IsNil) c.Assert(len(response.ContainerItems) >= 2, chk.Equals, true) // The response should contain at least the two created containers. Probably many more } func (s *aztestsSuite) TestAccountListContainersIncludeTypeMetadata(c *chk.C) { bsu := getBSU() containerURLNoMetadata, nameNoMetadata := createNewContainerWithSuffix(c, bsu, "a") defer deleteContainer(c, containerURLNoMetadata) containerURLMetadata, nameMetadata := createNewContainerWithSuffix(c, bsu, "b") defer deleteContainer(c, containerURLMetadata) // Test on containers with and without metadata _, err := containerURLMetadata.SetMetadata(ctx, basicMetadata, ContainerAccessConditions{}) c.Assert(err, chk.IsNil) // Also validates not specifying MaxResults response, err := bsu.ListContainersSegment(ctx, Marker{}, ListContainersSegmentOptions{Prefix: containerPrefix, Detail: ListContainersDetail{Metadata: true}}) c.Assert(err, chk.IsNil) c.Assert(response.ContainerItems[0].Name, chk.Equals, nameNoMetadata) c.Assert(response.ContainerItems[0].Metadata, chk.HasLen, 0) c.Assert(response.ContainerItems[1].Name, chk.Equals, nameMetadata) c.Assert(response.ContainerItems[1].Metadata, chk.DeepEquals, basicMetadata) } func (s *aztestsSuite) TestAccountListContainersMaxResultsNegative(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) _, err := bsu.ListContainersSegment(ctx, Marker{}, *(&ListContainersSegmentOptions{Prefix: containerPrefix, MaxResults: -2})) c.Assert(err, chk.Not(chk.IsNil)) } func (s *aztestsSuite) TestAccountListContainersMaxResultsZero(c *chk.C) { bsu := getBSU() containerURL, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL) // Max Results = 0 means the value will be ignored, the header not set, and the server default used resp, err := bsu.ListContainersSegment(ctx, Marker{}, *(&ListContainersSegmentOptions{Prefix: containerPrefix, MaxResults: 0})) c.Assert(err, chk.IsNil) // There could be existing container c.Assert(len(resp.ContainerItems) >= 1, chk.Equals, true) } func (s *aztestsSuite) TestAccountListContainersMaxResultsExact(c *chk.C) { // If this test fails, ensure there are no extra containers prefixed with go in the account. These may be left over if a test is interrupted. bsu := getBSU() containerURL1, containerName1 := createNewContainerWithSuffix(c, bsu, "a") defer deleteContainer(c, containerURL1) containerURL2, containerName2 := createNewContainerWithSuffix(c, bsu, "b") defer deleteContainer(c, containerURL2) response, err := bsu.ListContainersSegment(ctx, Marker{}, *(&ListContainersSegmentOptions{Prefix: containerPrefix, MaxResults: 2})) c.Assert(err, chk.IsNil) c.Assert(response.ContainerItems, chk.HasLen, 2) c.Assert(response.ContainerItems[0].Name, chk.Equals, containerName1) c.Assert(response.ContainerItems[1].Name, chk.Equals, containerName2) } func (s *aztestsSuite) TestAccountListContainersMaxResultsInsufficient(c *chk.C) { bsu := getBSU() containerURL1, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL1) containerURL2, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL2) response, err := bsu.ListContainersSegment(ctx, Marker{}, *(&ListContainersSegmentOptions{Prefix: containerPrefix, MaxResults: 1})) c.Assert(err, chk.IsNil) c.Assert(len(response.ContainerItems), chk.Equals, 1) } func (s *aztestsSuite) TestAccountListContainersMaxResultsSufficient(c *chk.C) { bsu := getBSU() containerURL1, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL1) containerURL2, _ := createNewContainer(c, bsu) defer deleteContainer(c, containerURL2) response, err := bsu.ListContainersSegment(ctx, Marker{}, *(&ListContainersSegmentOptions{Prefix: containerPrefix, MaxResults: 3})) c.Assert(err, chk.IsNil) // This case could be instable, there could be existing containers, so the count should be >= 2 c.Assert(len(response.ContainerItems) >= 2, chk.Equals, true) } func (s *aztestsSuite) TestAccountDeleteRetentionPolicy(c *chk.C) { bsu := getBSU() days := int32(5) _, err := bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: true, Days: &days}}) c.Assert(err, chk.IsNil) // From FE, 30 seconds is guaranteed t be enough. time.Sleep(time.Second * 30) resp, err := bsu.GetProperties(ctx) c.Assert(err, chk.IsNil) c.Assert(resp.DeleteRetentionPolicy.Enabled, chk.Equals, true) c.Assert(*resp.DeleteRetentionPolicy.Days, chk.Equals, int32(5)) _, err = bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: false}}) c.Assert(err, chk.IsNil) // From FE, 30 seconds is guaranteed t be enough. time.Sleep(time.Second * 30) resp, err = bsu.GetProperties(ctx) c.Assert(err, chk.IsNil) c.Assert(resp.DeleteRetentionPolicy.Enabled, chk.Equals, false) c.Assert(resp.DeleteRetentionPolicy.Days, chk.IsNil) } func (s *aztestsSuite) TestAccountDeleteRetentionPolicyEmpty(c *chk.C) { bsu := getBSU() days := int32(5) _, err := bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: true, Days: &days}}) c.Assert(err, chk.IsNil) // From FE, 30 seconds is guaranteed t be enough. time.Sleep(time.Second * 30) resp, err := bsu.GetProperties(ctx) c.Assert(err, chk.IsNil) c.Assert(resp.DeleteRetentionPolicy.Enabled, chk.Equals, true) c.Assert(*resp.DeleteRetentionPolicy.Days, chk.Equals, int32(5)) // Enabled should default to false and therefore disable the policy _, err = bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{}}) c.Assert(err, chk.IsNil) // From FE, 30 seconds is guaranteed t be enough. time.Sleep(time.Second * 30) resp, err = bsu.GetProperties(ctx) c.Assert(err, chk.IsNil) c.Assert(resp.DeleteRetentionPolicy.Enabled, chk.Equals, false) c.Assert(resp.DeleteRetentionPolicy.Days, chk.IsNil) } func (s *aztestsSuite) TestAccountDeleteRetentionPolicyNil(c *chk.C) { bsu := getBSU() days := int32(5) _, err := bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: true, Days: &days}}) c.Assert(err, chk.IsNil) // From FE, 30 seconds is guaranteed t be enough. time.Sleep(time.Second * 30) resp, err := bsu.GetProperties(ctx) c.Assert(err, chk.IsNil) c.Assert(resp.DeleteRetentionPolicy.Enabled, chk.Equals, true) c.Assert(*resp.DeleteRetentionPolicy.Days, chk.Equals, int32(5)) _, err = bsu.SetProperties(ctx, StorageServiceProperties{}) c.Assert(err, chk.IsNil) // From FE, 30 seconds is guaranteed t be enough. time.Sleep(time.Second * 30) // If an element of service properties is not passed, the service keeps the current settings. resp, err = bsu.GetProperties(ctx) c.Assert(err, chk.IsNil) c.Assert(resp.DeleteRetentionPolicy.Enabled, chk.Equals, true) c.Assert(*resp.DeleteRetentionPolicy.Days, chk.Equals, int32(5)) // Disable for other tests bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: false}}) } func (s *aztestsSuite) TestAccountDeleteRetentionPolicyDaysTooSmall(c *chk.C) { bsu := getBSU() days := int32(0) // Minimum days is 1. Validated on the client. _, err := bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: true, Days: &days}}) c.Assert(strings.Contains(err.Error(), validationErrorSubstring), chk.Equals, true) } func (s *aztestsSuite) TestAccountDeleteRetentionPolicyDaysTooLarge(c *chk.C) { bsu := getBSU() days := int32(366) // Max days is 365. Left to the service for validation. _, err := bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: true, Days: &days}}) validateStorageError(c, err, ServiceCodeInvalidXMLDocument) } func (s *aztestsSuite) TestAccountDeleteRetentionPolicyDaysOmitted(c *chk.C) { bsu := getBSU() // Days is required if enabled is true. _, err := bsu.SetProperties(ctx, StorageServiceProperties{DeleteRetentionPolicy: &RetentionPolicy{Enabled: true}}) validateStorageError(c, err, ServiceCodeInvalidXMLDocument) } azure-storage-blob-go-0.10.0/azblob/zt_user_delegation_sas_test.go000066400000000000000000000106061367515646300253150ustar00rootroot00000000000000package azblob import ( "bytes" "strings" "time" chk "gopkg.in/check.v1" ) //Creates a container and tests permissions by listing blobs func (s *aztestsSuite) TestUserDelegationSASContainer(c *chk.C) { bsu := getBSU() containerURL, containerName := getContainerURL(c, bsu) currentTime := time.Now().UTC() ocred, err := getOAuthCredential("") if err != nil { c.Fatal(err) } // Create pipeline w/ OAuth to handle user delegation key obtaining p := NewPipeline(*ocred, PipelineOptions{}) bsu = bsu.WithPipeline(p) keyInfo := NewKeyInfo(currentTime, currentTime.Add(48*time.Hour)) cudk, err := bsu.GetUserDelegationCredential(ctx, keyInfo, nil, nil) if err != nil { c.Fatal(err) } cSAS, err := BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, StartTime: currentTime, ExpiryTime: currentTime.Add(24 * time.Hour), Permissions: "racwdl", ContainerName: containerName, }.NewSASQueryParameters(cudk) // Create anonymous pipeline p = NewPipeline(NewAnonymousCredential(), PipelineOptions{}) // Create the container _, err = containerURL.Create(ctx, Metadata{}, PublicAccessNone) defer containerURL.Delete(ctx, ContainerAccessConditions{}) if err != nil { c.Fatal(err) } // Craft a container URL w/ container UDK SAS cURL := containerURL.URL() cURL.RawQuery += cSAS.Encode() cSASURL := NewContainerURL(cURL, p) bblob := cSASURL.NewBlockBlobURL("test") _, err = bblob.Upload(ctx, strings.NewReader("hello world!"), BlobHTTPHeaders{}, Metadata{}, BlobAccessConditions{}) if err != nil { c.Fatal(err) } resp, err := bblob.Download(ctx, 0, 0, BlobAccessConditions{}, false) data := &bytes.Buffer{} body := resp.Body(RetryReaderOptions{}) if body == nil { c.Fatal("download body was nil") } _, err = data.ReadFrom(body) if err != nil { c.Fatal(err) } err = body.Close() if err != nil { c.Fatal(err) } c.Assert(data.String(), chk.Equals, "hello world!") _, err = bblob.Delete(ctx, DeleteSnapshotsOptionNone, BlobAccessConditions{}) if err != nil { c.Fatal(err) } } // Creates a blob, takes a snapshot, downloads from snapshot, and deletes from the snapshot w/ the token func (s *aztestsSuite) TestUserDelegationSASBlob(c *chk.C) { // Accumulate prerequisite details to create storage etc. bsu := getBSU() containerURL, containerName := getContainerURL(c, bsu) blobURL, blobName := getBlockBlobURL(c, containerURL) currentTime := time.Now().UTC() ocred, err := getOAuthCredential("") if err != nil { c.Fatal(err) } // Create pipeline to handle requests p := NewPipeline(*ocred, PipelineOptions{}) // Prepare user delegation key bsu = bsu.WithPipeline(p) keyInfo := NewKeyInfo(currentTime, currentTime.Add(48*time.Hour)) budk, err := bsu.GetUserDelegationCredential(ctx, keyInfo, nil, nil) //MUST have TokenCredential if err != nil { c.Fatal(err) } // Prepare User Delegation SAS query bSAS, err := BlobSASSignatureValues{ Protocol: SASProtocolHTTPS, StartTime: currentTime, ExpiryTime: currentTime.Add(24 * time.Hour), Permissions: "rd", ContainerName: containerName, BlobName: blobName, }.NewSASQueryParameters(budk) if err != nil { c.Fatal(err) } // Create pipeline p = NewPipeline(NewAnonymousCredential(), PipelineOptions{}) // Append User Delegation SAS token to URL bSASParts := NewBlobURLParts(blobURL.URL()) bSASParts.SAS = bSAS bSASURL := NewBlockBlobURL(bSASParts.URL(), p) // Create container & upload sample data _, err = containerURL.Create(ctx, Metadata{}, PublicAccessNone) defer containerURL.Delete(ctx, ContainerAccessConditions{}) if err != nil { c.Fatal(err) } data := "Hello World!" _, err = blobURL.Upload(ctx, strings.NewReader(data), BlobHTTPHeaders{ContentType: "text/plain"}, Metadata{}, BlobAccessConditions{}) if err != nil { c.Fatal(err) } // Download data via User Delegation SAS URL; must succeed downloadResponse, err := bSASURL.Download(ctx, 0, 0, BlobAccessConditions{}, false) if err != nil { c.Fatal(err) } downloadedData := &bytes.Buffer{} reader := downloadResponse.Body(RetryReaderOptions{}) _, err = downloadedData.ReadFrom(reader) if err != nil { c.Fatal(err) } err = reader.Close() if err != nil { c.Fatal(err) } c.Assert(data, chk.Equals, downloadedData.String()) // Delete the item using the User Delegation SAS URL; must succeed _, err = bSASURL.Delete(ctx, DeleteSnapshotsOptionInclude, BlobAccessConditions{}) if err != nil { c.Fatal(err) } } azure-storage-blob-go-0.10.0/azblob/zz_generated_append_blob.go000066400000000000000000000574701367515646300245420ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "encoding/base64" "github.com/Azure/azure-pipeline-go/pipeline" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" ) // appendBlobClient is the client for the AppendBlob methods of the Azblob service. type appendBlobClient struct { managementClient } // newAppendBlobClient creates an instance of the appendBlobClient client. func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient { return appendBlobClient{newManagementClient(url, p)} } // AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append // Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is // supported only on version 2015-02-21 version or later. // // body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an // error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more // information, see Setting // Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to // be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be // validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active and // matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If // the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the // value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - // Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A // number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this // number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - // Precondition Failed). encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in // the request. If not specified, encryption is performed with the root account encryption key. For more information, // see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided // encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm // used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the // x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a blob if it // has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a // blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on // blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. // requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics // logs when storage analytics logging is enabled. func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) { if err := validate([]validation{ {targetValue: body, constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, transactionalContentCrc64, leaseID, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req) if err != nil { return nil, err } return resp.(*AppendBlobAppendBlockResponse), err } // appendBlockPreparer prepares the AppendBlock request. func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, body) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "appendblock") req.URL.RawQuery = params.Encode() req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) if transactionalContentMD5 != nil { req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) } if transactionalContentCrc64 != nil { req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if maxSize != nil { req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) } if appendPosition != nil { req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // appendBlockResponder handles the response to the AppendBlock request. func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err } // AppendBlockFromURL the Append Block operation commits a new block of data to the end of an existing append blob // where the contents are read from a source url. The Append Block operation is permitted only if the blob was created // with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // // sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of // source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must // be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the range of bytes that must be // read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to // be validated by the service. encryptionKey is optional. Specifies the encryption key to use to encrypt the data // provided in the request. If not specified, encryption is performed with the root account encryption key. For more // information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the // provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the // algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided // if the x-ms-encryption-key header is provided. leaseID is if specified, the operation only succeeds if the // resource's lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes // permitted for the append blob. If the Append Block operation would cause the blob to exceed that limit or if the // blob size is already greater than the value specified in this header, the request will fail with // MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). appendPosition is optional // conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. Append // Block will succeed only if the append position is equal to this number. If it is not, the request will fail with the // AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this // header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is // specify this header value to operate only on a blob if it has not been modified since the specified date/time. // ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag // value to operate only on blobs without a matching value. sourceIfModifiedSince is specify this header value to // operate only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify // this header value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch // is specify an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value // to operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 // KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, transactionalContentMD5, encryptionKey, encryptionKeySha256, encryptionAlgorithm, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockFromURLResponder}, req) if err != nil { return nil, err } return resp.(*AppendBlobAppendBlockFromURLResponse), err } // appendBlockFromURLPreparer prepares the AppendBlockFromURL request. func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "appendblock") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-copy-source", sourceURL) if sourceRange != nil { req.Header.Set("x-ms-source-range", *sourceRange) } if sourceContentMD5 != nil { req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) } if sourceContentcrc64 != nil { req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) } req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) if transactionalContentMD5 != nil { req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if maxSize != nil { req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) } if appendPosition != nil { req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } if sourceIfModifiedSince != nil { req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfUnmodifiedSince != nil { req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfMatch != nil { req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) } if sourceIfNoneMatch != nil { req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // appendBlockFromURLResponder handles the response to the AppendBlockFromURL request. func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &AppendBlobAppendBlockFromURLResponse{rawResponse: resp.Response()}, err } // Create the Create Append Blob operation creates a new append blob. // // contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more // information, see Setting // Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, // this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the // blob's content encoding. If specified, this property is stored with the blob and returned with a read request. // blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the // blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this // hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. // blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and // returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the // blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the // destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, // metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and // Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is // active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. // encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not // specified, encryption is performed with the root account encryption key. For more information, see Encryption at // Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be // provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the // encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key // header is provided. ifModifiedSince is specify this header value to operate only on a blob if it has been modified // since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has // not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a // matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is // provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when // storage analytics logging is enabled. func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) if err != nil { return nil, err } return resp.(*AppendBlobCreateResponse), err } // createPreparer prepares the Create request. func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } req.URL.RawQuery = params.Encode() req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) if blobContentType != nil { req.Header.Set("x-ms-blob-content-type", *blobContentType) } if blobContentEncoding != nil { req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) } if blobContentLanguage != nil { req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) } if blobContentMD5 != nil { req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) } if blobCacheControl != nil { req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) } if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if blobContentDisposition != nil { req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-blob-type", "AppendBlob") return req, nil } // createResponder handles the response to the Create request. func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err } azure-storage-blob-go-0.10.0/azblob/zz_generated_blob.go000066400000000000000000002466241367515646300232140ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "encoding/base64" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" "github.com/Azure/azure-pipeline-go/pipeline" ) // blobClient is the client for the Blob methods of the Azblob service. type blobClient struct { managementClient } // newBlobClient creates an instance of the blobClient client. func newBlobClient(url url.URL, p pipeline.Pipeline) blobClient { return blobClient{newManagementClient(url, p)} } // AbortCopyFromURL the Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a // destination blob with zero length and full metadata. // // copyID is the copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. timeout is // the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character // limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) AbortCopyFromURL(ctx context.Context, copyID string, timeout *int32, leaseID *string, requestID *string) (*BlobAbortCopyFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.abortCopyFromURLPreparer(copyID, timeout, leaseID, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.abortCopyFromURLResponder}, req) if err != nil { return nil, err } return resp.(*BlobAbortCopyFromURLResponse), err } // abortCopyFromURLPreparer prepares the AbortCopyFromURL request. func (client blobClient) abortCopyFromURLPreparer(copyID string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() params.Set("copyid", copyID) if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "copy") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-copy-action", "abort") return req, nil } // abortCopyFromURLResponder handles the response to the AbortCopyFromURL request. func (client blobClient) abortCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusNoContent) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobAbortCopyFromURLResponse{rawResponse: resp.Response()}, err } // AcquireLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete // operations // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative // one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration // cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob // service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor // (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a // blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to // operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs // without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is // recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobAcquireLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) if err != nil { return nil, err } return resp.(*BlobAcquireLeaseResponse), err } // acquireLeasePreparer prepares the AcquireLease request. func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") req.URL.RawQuery = params.Encode() if duration != nil { req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) } if proposedLeaseID != nil { req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "acquire") return req, nil } // acquireLeaseResponder handles the response to the AcquireLease request. func (client blobClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobAcquireLeaseResponse{rawResponse: resp.Response()}, err } // BreakLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete // operations // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should // continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the // time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available // before the break period has expired, but the lease may be held for longer than the break period. If this header does // not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an // infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has // been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a // blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on // blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. // requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics // logs when storage analytics logging is enabled. func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobBreakLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) if err != nil { return nil, err } return resp.(*BlobBreakLeaseResponse), err } // breakLeasePreparer prepares the BreakLease request. func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") req.URL.RawQuery = params.Encode() if breakPeriod != nil { req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "break") return req, nil } // breakLeaseResponder handles the response to the BreakLease request. func (client blobClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusAccepted) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobBreakLeaseResponse{rawResponse: resp.Response()}, err } // ChangeLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete // operations // // leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string // format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See // Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in // seconds. For more information, see Setting // Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a // matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded // in the analytics logs when storage analytics logging is enabled. func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobChangeLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) if err != nil { return nil, err } return resp.(*BlobChangeLeaseResponse), err } // changeLeasePreparer prepares the ChangeLease request. func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-lease-id", leaseID) req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "change") return req, nil } // changeLeaseResponder handles the response to the ChangeLease request. func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobChangeLeaseResponse{rawResponse: resp.Response()}, err } // CopyFromURL the Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a // response until the copy is complete. // // copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that // specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob // must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is // expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated // with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or // file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with // the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing // Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. // sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the // specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not // been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a // matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. leaseID is if specified, the // operation only succeeds if the resource's lease is active and matches this ID. requestID is provides a // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be // read from the copy source. func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string, sourceContentMD5 []byte) (*BlobCopyFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.copyFromURLPreparer(copySource, timeout, metadata, tier, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID, sourceContentMD5) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyFromURLResponder}, req) if err != nil { return nil, err } return resp.(*BlobCopyFromURLResponse), err } // copyFromURLPreparer prepares the CopyFromURL request. func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string, sourceContentMD5 []byte) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } req.URL.RawQuery = params.Encode() if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if tier != AccessTierNone { req.Header.Set("x-ms-access-tier", string(tier)) } if sourceIfModifiedSince != nil { req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfUnmodifiedSince != nil { req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfMatch != nil { req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) } if sourceIfNoneMatch != nil { req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-copy-source", copySource) if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } if sourceContentMD5 != nil { req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) } req.Header.Set("x-ms-requires-sync", "true") return req, nil } // copyFromURLResponder handles the response to the CopyFromURL request. func (client blobClient) copyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusAccepted) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobCopyFromURLResponse{rawResponse: resp.Response()}, err } // CreateSnapshot the Create Snapshot operation creates a read-only snapshot of a blob // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated // with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or // file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with // the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing // Containers, Blobs, and Metadata for more information. encryptionKey is optional. Specifies the encryption key to use // to encrypt the data provided in the request. If not specified, encryption is performed with the root account // encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the // SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. // encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is // "AES256". Must be provided if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header // value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify // this header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is // specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to // operate only on blobs without a matching value. leaseID is if specified, the operation only succeeds if the // resource's lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.createSnapshotPreparer(timeout, metadata, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createSnapshotResponder}, req) if err != nil { return nil, err } return resp.(*BlobCreateSnapshotResponse), err } // createSnapshotPreparer prepares the CreateSnapshot request. func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "snapshot") req.URL.RawQuery = params.Encode() if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // createSnapshotResponder handles the response to the CreateSnapshot request. func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobCreateSnapshotResponse{rawResponse: resp.Response()}, err } // Delete if the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently // removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is // deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob // or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] // (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently // removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it // is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which // blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. // All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 // (ResourceNotFound). // // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to // retrieve. For more information on working with blob snapshots, see Creating // a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one // of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's // snapshots and not the blob itself ifModifiedSince is specify this header value to operate only on a blob if it has // been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a // blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on // blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. // requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics // logs when storage analytics logging is enabled. func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobDeleteResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.deletePreparer(snapshot, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) if err != nil { return nil, err } return resp.(*BlobDeleteResponse), err } // deletePreparer prepares the Delete request. func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("DELETE", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if snapshot != nil && len(*snapshot) > 0 { params.Set("snapshot", *snapshot) } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if deleteSnapshots != DeleteSnapshotsOptionNone { req.Header.Set("x-ms-delete-snapshots", string(deleteSnapshots)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // deleteResponder handles the response to the Delete request. func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusAccepted) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobDeleteResponse{rawResponse: resp.Response()}, err } // Download the Download operation reads or downloads a blob from the system, including its metadata and properties. // You can also call Download to read a snapshot. // // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to // retrieve. For more information on working with blob snapshots, see Creating // a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. // rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for // the range, as long as the range is less than or equal to 4 MB in size. rangeGetContentCRC64 is when set to true and // specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less // than or equal to 4 MB in size. encryptionKey is optional. Specifies the encryption key to use to encrypt the data // provided in the request. If not specified, encryption is performed with the root account encryption key. For more // information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the // provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the // algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided // if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a // blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to // operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs // without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is // recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) Download(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*downloadResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.downloadPreparer(snapshot, timeout, rangeParameter, leaseID, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.downloadResponder}, req) if err != nil { return nil, err } return resp.(*downloadResponse), err } // downloadPreparer prepares the Download request. func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if snapshot != nil && len(*snapshot) > 0 { params.Set("snapshot", *snapshot) } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } req.URL.RawQuery = params.Encode() if rangeParameter != nil { req.Header.Set("x-ms-range", *rangeParameter) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if rangeGetContentMD5 != nil { req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5)) } if rangeGetContentCRC64 != nil { req.Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*rangeGetContentCRC64)) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // downloadResponder handles the response to the Download request. func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) if resp == nil { return nil, err } return &downloadResponse{rawResponse: resp.Response()}, err } // GetAccessControl get the owner, group, permissions, or access control list for a blob. // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. upn is optional. Valid only when Hierarchical Namespace is enabled for the // account. If "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will // be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be // returned as Azure Active Directory Object IDs. The default value is false. leaseID is if specified, the operation // only succeeds if the resource's lease is active and matches this ID. ifMatch is specify an ETag value to operate // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a // matching value. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since // the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been // modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB character // limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) GetAccessControl(ctx context.Context, timeout *int32, upn *bool, leaseID *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*BlobGetAccessControlResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getAccessControlPreparer(timeout, upn, leaseID, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessControlResponder}, req) if err != nil { return nil, err } return resp.(*BlobGetAccessControlResponse), err } // getAccessControlPreparer prepares the GetAccessControl request. func (client blobClient) getAccessControlPreparer(timeout *int32, upn *bool, leaseID *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("HEAD", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } if upn != nil { params.Set("upn", strconv.FormatBool(*upn)) } params.Set("action", "getAccessControl") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-version", ServiceVersion) return req, nil } // getAccessControlResponder handles the response to the GetAccessControl request. func (client blobClient) getAccessControlResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobGetAccessControlResponse{rawResponse: resp.Response()}, err } // GetAccountInfo returns the sku name and account kind func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { req, err := client.getAccountInfoPreparer() if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) if err != nil { return nil, err } return resp.(*BlobGetAccountInfoResponse), err } // getAccountInfoPreparer prepares the GetAccountInfo request. func (client blobClient) getAccountInfoPreparer() (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() params.Set("restype", "account") params.Set("comp", "properties") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) return req, nil } // getAccountInfoResponder handles the response to the GetAccountInfo request. func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobGetAccountInfoResponse{rawResponse: resp.Response()}, err } // GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system // properties for the blob. It does not return the content of the blob. // // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to // retrieve. For more information on working with blob snapshots, see Creating // a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the // data provided in the request. If not specified, encryption is performed with the root account encryption key. For // more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the // provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the // algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided // if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a // blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to // operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs // without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is // recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) GetProperties(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobGetPropertiesResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getPropertiesPreparer(snapshot, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) if err != nil { return nil, err } return resp.(*BlobGetPropertiesResponse), err } // getPropertiesPreparer prepares the GetProperties request. func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("HEAD", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if snapshot != nil && len(*snapshot) > 0 { params.Set("snapshot", *snapshot) } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // getPropertiesResponder handles the response to the GetProperties request. func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobGetPropertiesResponse{rawResponse: resp.Response()}, err } // ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete // operations // // leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. // For more information, see Setting // Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a // matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded // in the analytics logs when storage analytics logging is enabled. func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobReleaseLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) if err != nil { return nil, err } return resp.(*BlobReleaseLeaseResponse), err } // releaseLeasePreparer prepares the ReleaseLease request. func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-lease-id", leaseID) if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "release") return req, nil } // releaseLeaseResponder handles the response to the ReleaseLease request. func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobReleaseLeaseResponse{rawResponse: resp.Response()}, err } // RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete // operations // // leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. // For more information, see Setting // Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a // matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded // in the analytics logs when storage analytics logging is enabled. func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobRenewLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) if err != nil { return nil, err } return resp.(*BlobRenewLeaseResponse), err } // renewLeasePreparer prepares the RenewLease request. func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-lease-id", leaseID) if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "renew") return req, nil } // renewLeaseResponder handles the response to the RenewLease request. func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobRenewLeaseResponse{rawResponse: resp.Response()}, err } // SetAccessControl set the owner, group, permissions, or access control list for a blob. // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. owner is optional. The owner of the blob or directory. group is optional. The // owning group of the blob or directory. posixPermissions is optional and only valid if Hierarchical Namespace is // enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each // class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic // (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. posixACL is sets POSIX access control rights on // files and directories. The value is a comma-separated list of access control entries. Each access control entry // (ACE) consists of a scope, a type, a user or group identifier, and permissions in the format // "[scope:][type]:[id]:[permissions]". ifMatch is specify an ETag value to operate only on blobs with a matching // value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifModifiedSince is // specify this header value to operate only on a blob if it has been modified since the specified date/time. // ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the // specified date/time. requestID is provides a client-generated, opaque value with a 1 KB character limit that is // recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) SetAccessControl(ctx context.Context, timeout *int32, leaseID *string, owner *string, group *string, posixPermissions *string, posixACL *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*BlobSetAccessControlResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.setAccessControlPreparer(timeout, leaseID, owner, group, posixPermissions, posixACL, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessControlResponder}, req) if err != nil { return nil, err } return resp.(*BlobSetAccessControlResponse), err } // setAccessControlPreparer prepares the SetAccessControl request. func (client blobClient) setAccessControlPreparer(timeout *int32, leaseID *string, owner *string, group *string, posixPermissions *string, posixACL *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PATCH", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("action", "setAccessControl") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if owner != nil { req.Header.Set("x-ms-owner", *owner) } if group != nil { req.Header.Set("x-ms-group", *group) } if posixPermissions != nil { req.Header.Set("x-ms-permissions", *posixPermissions) } if posixACL != nil { req.Header.Set("x-ms-acl", *posixACL) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-version", ServiceVersion) return req, nil } // setAccessControlResponder handles the response to the SetAccessControl request. func (client blobClient) setAccessControlResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobSetAccessControlResponse{rawResponse: resp.Response()}, err } // SetHTTPHeaders the Set HTTP Headers operation sets system properties on the blob // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, // this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's // content type. If specified, this property is stored with the blob and returned with a read request. blobContentMD5 // is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual // blocks were validated when each was uploaded. blobContentEncoding is optional. Sets the blob's content encoding. If // specified, this property is stored with the blob and returned with a read request. blobContentLanguage is optional. // Set the blob's content language. If specified, this property is stored with the blob and returned with a read // request. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobContentDisposition is // optional. Sets the blob's Content-Disposition header. requestID is provides a client-generated, opaque value with a // 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) SetHTTPHeaders(ctx context.Context, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (*BlobSetHTTPHeadersResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.setHTTPHeadersPreparer(timeout, blobCacheControl, blobContentType, blobContentMD5, blobContentEncoding, blobContentLanguage, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobContentDisposition, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setHTTPHeadersResponder}, req) if err != nil { return nil, err } return resp.(*BlobSetHTTPHeadersResponse), err } // setHTTPHeadersPreparer prepares the SetHTTPHeaders request. func (client blobClient) setHTTPHeadersPreparer(timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobContentDisposition *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "properties") req.URL.RawQuery = params.Encode() if blobCacheControl != nil { req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) } if blobContentType != nil { req.Header.Set("x-ms-blob-content-type", *blobContentType) } if blobContentMD5 != nil { req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) } if blobContentEncoding != nil { req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) } if blobContentLanguage != nil { req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } if blobContentDisposition != nil { req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // setHTTPHeadersResponder handles the response to the SetHTTPHeaders request. func (client blobClient) setHTTPHeadersResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobSetHTTPHeadersResponse{rawResponse: resp.Response()}, err } // SetMetadata the Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more // name-value pairs // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated // with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or // file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with // the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing // Containers, Blobs, and Metadata for more information. leaseID is if specified, the operation only succeeds if the // resource's lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to // encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption // key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 // hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. // encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is // "AES256". Must be provided if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header // value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify // this header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is // specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to // operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobSetMetadataResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.setMetadataPreparer(timeout, metadata, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) if err != nil { return nil, err } return resp.(*BlobSetMetadataResponse), err } // setMetadataPreparer prepares the SetMetadata request. func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "metadata") req.URL.RawQuery = params.Encode() if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // setMetadataResponder handles the response to the SetMetadata request. func (client blobClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobSetMetadataResponse{rawResponse: resp.Response()}, err } // SetTier the Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage // account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier // determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive // storage type. This operation does not update the blob's ETag. // // tier is indicates the tier to be set on the blob. timeout is the timeout parameter is expressed in seconds. For more // information, see Setting // Timeouts for Blob Service Operations. rehydratePriority is optional: Indicates the priority with which to // rehydrate an archived blob. requestID is provides a client-generated, opaque value with a 1 KB character limit that // is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if specified, the operation // only succeeds if the resource's lease is active and matches this ID. func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string) (*BlobSetTierResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.setTierPreparer(tier, timeout, rehydratePriority, requestID, leaseID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTierResponder}, req) if err != nil { return nil, err } return resp.(*BlobSetTierResponse), err } // setTierPreparer prepares the SetTier request. func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "tier") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-access-tier", string(tier)) if rehydratePriority != RehydratePriorityNone { req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } return req, nil } // setTierResponder handles the response to the SetTier request. func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusAccepted) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobSetTierResponse{rawResponse: resp.Response()}, err } // StartCopyFromURL the Start Copy From URL operation copies a blob or an internet resource to a new blob. // // copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that // specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob // must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is // expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated // with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or // file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with // the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing // Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. // rehydratePriority is optional: Indicates the priority with which to rehydrate an archived blob. // sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the // specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not // been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a // matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. leaseID is if specified, the // operation only succeeds if the resource's lease is active and matches this ID. requestID is provides a // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, tier, rehydratePriority, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.startCopyFromURLResponder}, req) if err != nil { return nil, err } return resp.(*BlobStartCopyFromURLResponse), err } // startCopyFromURLPreparer prepares the StartCopyFromURL request. func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } req.URL.RawQuery = params.Encode() if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if tier != AccessTierNone { req.Header.Set("x-ms-access-tier", string(tier)) } if rehydratePriority != RehydratePriorityNone { req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) } if sourceIfModifiedSince != nil { req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfUnmodifiedSince != nil { req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfMatch != nil { req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) } if sourceIfNoneMatch != nil { req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-copy-source", copySource) if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // startCopyFromURLResponder handles the response to the StartCopyFromURL request. func (client blobClient) startCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusAccepted) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobStartCopyFromURLResponse{rawResponse: resp.Response()}, err } // Undelete undelete a blob that was previously soft deleted // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blobClient) Undelete(ctx context.Context, timeout *int32, requestID *string) (*BlobUndeleteResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.undeletePreparer(timeout, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.undeleteResponder}, req) if err != nil { return nil, err } return resp.(*BlobUndeleteResponse), err } // undeletePreparer prepares the Undelete request. func (client blobClient) undeletePreparer(timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "undelete") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // undeleteResponder handles the response to the Undelete request. func (client blobClient) undeleteResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlobUndeleteResponse{rawResponse: resp.Response()}, err } azure-storage-blob-go-0.10.0/azblob/zz_generated_block_blob.go000066400000000000000000001022731367515646300243550ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "bytes" "context" "encoding/base64" "encoding/xml" "github.com/Azure/azure-pipeline-go/pipeline" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" ) // blockBlobClient is the client for the BlockBlob methods of the Azblob service. type blockBlobClient struct { managementClient } // newBlockBlobClient creates an instance of the blockBlobClient client. func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient { return blockBlobClient{newManagementClient(url, p)} } // CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the // blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior // Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, // then committing the new and existing blocks together. You can do this by specifying whether to commit a block from // the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the // block, whichever list it may belong to. // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, // this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's // content type. If specified, this property is stored with the blob and returned with a read request. // blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the // blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If // specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An // MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were // validated when each was uploaded. transactionalContentMD5 is specify the transactional md5 for the body, to be // validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated // by the service. metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no // name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, // and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names // must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for // more information. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches // this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. // Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is // performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage // Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the // x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key // hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is // provided. tier is optional. Indicates the tier to be set on the blob. ifModifiedSince is specify this header value // to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this // header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify // an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only // on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character // limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, transactionalContentMD5, transactionalContentCrc64, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req) if err != nil { return nil, err } return resp.(*BlockBlobCommitBlockListResponse), err } // commitBlockListPreparer prepares the CommitBlockList request. func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "blocklist") req.URL.RawQuery = params.Encode() if blobCacheControl != nil { req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) } if blobContentType != nil { req.Header.Set("x-ms-blob-content-type", *blobContentType) } if blobContentEncoding != nil { req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) } if blobContentLanguage != nil { req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) } if blobContentMD5 != nil { req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) } if transactionalContentMD5 != nil { req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) } if transactionalContentCrc64 != nil { req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) } if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if blobContentDisposition != nil { req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if tier != AccessTierNone { req.Header.Set("x-ms-access-tier", string(tier)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } b, err := xml.Marshal(blocks) if err != nil { return req, pipeline.NewError(err, "failed to marshal request body") } req.Header.Set("Content-Type", "application/xml") err = req.SetBody(bytes.NewReader(b)) if err != nil { return req, pipeline.NewError(err, "failed to set request body") } return req, nil } // commitBlockListResponder handles the response to the CommitBlockList request. func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err } // GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block // blob // // listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists // together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob // snapshot to retrieve. For more information on working with blob snapshots, see Creating // a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character // limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req) if err != nil { return nil, err } return resp.(*BlockList), err } // getBlockListPreparer prepares the GetBlockList request. func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if snapshot != nil && len(*snapshot) > 0 { params.Set("snapshot", *snapshot) } params.Set("blocklisttype", string(listType)) if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "blocklist") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // getBlockListResponder handles the response to the GetBlockList request. func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &BlockList{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // StageBlock the Stage Block operation creates a new block to be committed as part of a blob // // blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or // equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the // same size for each block. contentLength is the length of the request. body is initial data body will be closed upon // successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the // transactional md5 for the body, to be validated by the service. transactionalContentCrc64 is specify the // transactional crc64 for the body, to be validated by the service. timeout is the timeout parameter is expressed in // seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the // data provided in the request. If not specified, encryption is performed with the root account encryption key. For // more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the // provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the // algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided // if the x-ms-encryption-key header is provided. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, requestID *string) (*BlockBlobStageBlockResponse, error) { if err := validate([]validation{ {targetValue: body, constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req) if err != nil { return nil, err } return resp.(*BlockBlobStageBlockResponse), err } // stageBlockPreparer prepares the StageBlock request. func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, body) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() params.Set("blockid", blockID) if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "block") req.URL.RawQuery = params.Encode() req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) if transactionalContentMD5 != nil { req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) } if transactionalContentCrc64 != nil { req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // stageBlockResponder handles the response to the StageBlock request. func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err } // StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents // are read from a URL. // // blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or // equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the // same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source. // sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the // range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the // range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For // more information, see Setting // Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt // the data provided in the request. If not specified, encryption is performed with the root account encryption key. // For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of // the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is // the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be // provided if the x-ms-encryption-key header is provided. leaseID is if specified, the operation only succeeds if the // resource's lease is active and matches this ID. sourceIfModifiedSince is specify this header value to operate only // on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header // value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify // an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate // only on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character // limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req) if err != nil { return nil, err } return resp.(*BlockBlobStageBlockFromURLResponse), err } // stageBlockFromURLPreparer prepares the StageBlockFromURL request. func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() params.Set("blockid", blockID) if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "block") req.URL.RawQuery = params.Encode() req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) req.Header.Set("x-ms-copy-source", sourceURL) if sourceRange != nil { req.Header.Set("x-ms-source-range", *sourceRange) } if sourceContentMD5 != nil { req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) } if sourceContentcrc64 != nil { req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if sourceIfModifiedSince != nil { req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfUnmodifiedSince != nil { req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfMatch != nil { req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) } if sourceIfNoneMatch != nil { req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // stageBlockFromURLResponder handles the response to the StageBlockFromURL request. func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err } // Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block // blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of // the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a // block blob, use the Put Block List operation. // // body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an // error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more // information, see Setting // Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to // be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property // is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content // encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage // is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with // a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, // as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets // the blob's cache control. If specified, this property is stored with the blob and returned with a read request. // metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are // specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more // name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not // copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the // naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. // leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. // blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies // the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed // with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. // encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key // header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the // only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. tier is optional. // Indicates the tier to be set on the blob. ifModifiedSince is specify this header value to operate only on a blob if // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a // matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded // in the analytics logs when storage analytics logging is enabled. func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) { if err := validate([]validation{ {targetValue: body, constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.uploadPreparer(body, contentLength, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req) if err != nil { return nil, err } return resp.(*BlockBlobUploadResponse), err } // uploadPreparer prepares the Upload request. func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, body) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } req.URL.RawQuery = params.Encode() if transactionalContentMD5 != nil { req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) } req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) if blobContentType != nil { req.Header.Set("x-ms-blob-content-type", *blobContentType) } if blobContentEncoding != nil { req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) } if blobContentLanguage != nil { req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) } if blobContentMD5 != nil { req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) } if blobCacheControl != nil { req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) } if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if blobContentDisposition != nil { req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if tier != AccessTierNone { req.Header.Set("x-ms-access-tier", string(tier)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-blob-type", "BlockBlob") return req, nil } // uploadResponder handles the response to the Upload request. func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err } azure-storage-blob-go-0.10.0/azblob/zz_generated_client.go000066400000000000000000000016251367515646300235420ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/azure-pipeline-go/pipeline" "net/url" ) const ( // ServiceVersion specifies the version of the operations used in this package. ServiceVersion = "2019-02-02" ) // managementClient is the base client for Azblob. type managementClient struct { url url.URL p pipeline.Pipeline } // newManagementClient creates an instance of the managementClient client. func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient { return managementClient{ url: url, p: p, } } // URL returns a copy of the URL for this client. func (mc managementClient) URL() url.URL { return mc.url } // Pipeline returns the pipeline for this client. func (mc managementClient) Pipeline() pipeline.Pipeline { return mc.p } azure-storage-blob-go-0.10.0/azblob/zz_generated_container.go000066400000000000000000001403101367515646300242410ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "bytes" "context" "encoding/xml" "github.com/Azure/azure-pipeline-go/pipeline" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" ) // containerClient is the client for the Container methods of the Azblob service. type containerClient struct { managementClient } // newContainerClient creates an instance of the containerClient client. func newContainerClient(url url.URL, p pipeline.Pipeline) containerClient { return containerClient{newManagementClient(url, p)} } // AcquireLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be // 15 to 60 seconds, or can be infinite // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative // one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration // cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob // service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor // (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a // blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to // operate only on a blob if it has not been modified since the specified date/time. requestID is provides a // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. func (client containerClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerAcquireLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) if err != nil { return nil, err } return resp.(*ContainerAcquireLeaseResponse), err } // acquireLeasePreparer prepares the AcquireLease request. func (client containerClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") params.Set("restype", "container") req.URL.RawQuery = params.Encode() if duration != nil { req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) } if proposedLeaseID != nil { req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "acquire") return req, nil } // acquireLeaseResponder handles the response to the AcquireLease request. func (client containerClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerAcquireLeaseResponse{rawResponse: resp.Response()}, err } // BreakLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should // continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the // time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available // before the break period has expired, but the lease may be held for longer than the break period. If this header does // not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an // infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has // been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a // blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque // value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client containerClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerBreakLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) if err != nil { return nil, err } return resp.(*ContainerBreakLeaseResponse), err } // breakLeasePreparer prepares the BreakLease request. func (client containerClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") params.Set("restype", "container") req.URL.RawQuery = params.Encode() if breakPeriod != nil { req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "break") return req, nil } // breakLeaseResponder handles the response to the BreakLease request. func (client containerClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusAccepted) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerBreakLeaseResponse{rawResponse: resp.Response()}, err } // ChangeLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be // 15 to 60 seconds, or can be infinite // // leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string // format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See // Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in // seconds. For more information, see Setting // Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, // opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is // enabled. func (client containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerChangeLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) if err != nil { return nil, err } return resp.(*ContainerChangeLeaseResponse), err } // changeLeasePreparer prepares the ChangeLease request. func (client containerClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") params.Set("restype", "container") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-lease-id", leaseID) req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "change") return req, nil } // changeLeaseResponder handles the response to the ChangeLease request. func (client containerClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerChangeLeaseResponse{rawResponse: resp.Response()}, err } // Create creates a new container under the specified account. If the container with the same name already exists, the // operation fails // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated // with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or // file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with // the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing // Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be // accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (*ContainerCreateResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.createPreparer(timeout, metadata, access, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) if err != nil { return nil, err } return resp.(*ContainerCreateResponse), err } // createPreparer prepares the Create request. func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "container") req.URL.RawQuery = params.Encode() if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if access != PublicAccessNone { req.Header.Set("x-ms-blob-public-access", string(access)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // createResponder handles the response to the Create request. func (client containerClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerCreateResponse{rawResponse: resp.Response()}, err } // Delete operation marks the specified container for deletion. The container and any blobs contained within it are // later deleted during garbage collection // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it // has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a // blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque // value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client containerClient) Delete(ctx context.Context, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerDeleteResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.deletePreparer(timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) if err != nil { return nil, err } return resp.(*ContainerDeleteResponse), err } // deletePreparer prepares the Delete request. func (client containerClient) deletePreparer(timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("DELETE", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "container") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // deleteResponder handles the response to the Delete request. func (client containerClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusAccepted) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerDeleteResponse{rawResponse: resp.Response()}, err } // GetAccessPolicy gets the permissions for the specified container. The permissions indicate whether container data // may be accessed publicly. // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character // limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client containerClient) GetAccessPolicy(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*SignedIdentifiers, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getAccessPolicyPreparer(timeout, leaseID, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessPolicyResponder}, req) if err != nil { return nil, err } return resp.(*SignedIdentifiers), err } // getAccessPolicyPreparer prepares the GetAccessPolicy request. func (client containerClient) getAccessPolicyPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "container") params.Set("comp", "acl") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // getAccessPolicyResponder handles the response to the GetAccessPolicy request. func (client containerClient) getAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &SignedIdentifiers{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // GetAccountInfo returns the sku name and account kind func (client containerClient) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) { req, err := client.getAccountInfoPreparer() if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) if err != nil { return nil, err } return resp.(*ContainerGetAccountInfoResponse), err } // getAccountInfoPreparer prepares the GetAccountInfo request. func (client containerClient) getAccountInfoPreparer() (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() params.Set("restype", "account") params.Set("comp", "properties") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) return req, nil } // getAccountInfoResponder handles the response to the GetAccountInfo request. func (client containerClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerGetAccountInfoResponse{rawResponse: resp.Response()}, err } // GetProperties returns all user-defined metadata and system properties for the specified container. The data returned // does not include the container's list of blobs // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character // limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client containerClient) GetProperties(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetPropertiesResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getPropertiesPreparer(timeout, leaseID, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) if err != nil { return nil, err } return resp.(*ContainerGetPropertiesResponse), err } // getPropertiesPreparer prepares the GetProperties request. func (client containerClient) getPropertiesPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "container") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // getPropertiesResponder handles the response to the GetProperties request. func (client containerClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerGetPropertiesResponse{rawResponse: resp.Response()}, err } // ListBlobFlatSegment [Update] The List Blobs operation returns a list of the blobs under the specified container // // prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a // string value that identifies the portion of the list of containers to be returned with the next listing operation. // The operation returns the NextMarker value within the response body if the listing operation did not return all // containers remaining to be listed with the current page. The NextMarker value can be used as the value for the // marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the // client. maxresults is specifies the maximum number of containers to return. If the request does not specify // maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the // remainder of the results. For this reason, it is possible that the service will return fewer results than specified // by maxresults, or than the default of 5000. include is include this parameter to specify one or more datasets to // include in the response. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client containerClient) ListBlobFlatSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsFlatSegmentResponse, error) { if err := validate([]validation{ {targetValue: maxresults, constraints: []constraint{{target: "maxresults", name: null, rule: false, chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.listBlobFlatSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobFlatSegmentResponder}, req) if err != nil { return nil, err } return resp.(*ListBlobsFlatSegmentResponse), err } // listBlobFlatSegmentPreparer prepares the ListBlobFlatSegment request. func (client containerClient) listBlobFlatSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if prefix != nil && len(*prefix) > 0 { params.Set("prefix", *prefix) } if marker != nil && len(*marker) > 0 { params.Set("marker", *marker) } if maxresults != nil { params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) } if include != nil && len(include) > 0 { params.Set("include", joinConst(include, ",")) } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "container") params.Set("comp", "list") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // listBlobFlatSegmentResponder handles the response to the ListBlobFlatSegment request. func (client containerClient) listBlobFlatSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &ListBlobsFlatSegmentResponse{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // ListBlobHierarchySegment [Update] The List Blobs operation returns a list of the blobs under the specified container // // delimiter is when the request includes this parameter, the operation returns a BlobPrefix element in the response // body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the // delimiter character. The delimiter may be a single character or a string. prefix is filters the results to return // only containers whose name begins with the specified prefix. marker is a string value that identifies the portion of // the list of containers to be returned with the next listing operation. The operation returns the NextMarker value // within the response body if the listing operation did not return all containers remaining to be listed with the // current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request // the next page of list items. The marker value is opaque to the client. maxresults is specifies the maximum number of // containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will // return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the // service will return fewer results than specified by maxresults, or than the default of 5000. include is include this // parameter to specify one or more datasets to include in the response. timeout is the timeout parameter is expressed // in seconds. For more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client containerClient) ListBlobHierarchySegment(ctx context.Context, delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsHierarchySegmentResponse, error) { if err := validate([]validation{ {targetValue: maxresults, constraints: []constraint{{target: "maxresults", name: null, rule: false, chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.listBlobHierarchySegmentPreparer(delimiter, prefix, marker, maxresults, include, timeout, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobHierarchySegmentResponder}, req) if err != nil { return nil, err } return resp.(*ListBlobsHierarchySegmentResponse), err } // listBlobHierarchySegmentPreparer prepares the ListBlobHierarchySegment request. func (client containerClient) listBlobHierarchySegmentPreparer(delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if prefix != nil && len(*prefix) > 0 { params.Set("prefix", *prefix) } params.Set("delimiter", delimiter) if marker != nil && len(*marker) > 0 { params.Set("marker", *marker) } if maxresults != nil { params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) } if include != nil && len(include) > 0 { params.Set("include", joinConst(include, ",")) } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "container") params.Set("comp", "list") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // listBlobHierarchySegmentResponder handles the response to the ListBlobHierarchySegment request. func (client containerClient) listBlobHierarchySegmentResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &ListBlobsHierarchySegmentResponse{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // ReleaseLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be // 15 to 60 seconds, or can be infinite // // leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. // For more information, see Setting // Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, // opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is // enabled. func (client containerClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerReleaseLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) if err != nil { return nil, err } return resp.(*ContainerReleaseLeaseResponse), err } // releaseLeasePreparer prepares the ReleaseLease request. func (client containerClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") params.Set("restype", "container") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-lease-id", leaseID) if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "release") return req, nil } // releaseLeaseResponder handles the response to the ReleaseLease request. func (client containerClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerReleaseLeaseResponse{rawResponse: resp.Response()}, err } // RenewLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // // leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. // For more information, see Setting // Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, // opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is // enabled. func (client containerClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerRenewLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) if err != nil { return nil, err } return resp.(*ContainerRenewLeaseResponse), err } // renewLeasePreparer prepares the RenewLease request. func (client containerClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "lease") params.Set("restype", "container") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-lease-id", leaseID) if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-lease-action", "renew") return req, nil } // renewLeaseResponder handles the response to the RenewLease request. func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerRenewLeaseResponse{rawResponse: resp.Response()}, err } // SetAccessPolicy sets the permissions for the specified container. The permissions indicate whether blobs in a // container may be accessed publicly. // // containerACL is the acls for the container timeout is the timeout parameter is expressed in seconds. For more // information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. access is specifies whether data in the container may be accessed publicly and // the level of access ifModifiedSince is specify this header value to operate only on a blob if it has been modified // since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has // not been modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client containerClient) SetAccessPolicy(ctx context.Context, containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerSetAccessPolicyResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.setAccessPolicyPreparer(containerACL, timeout, leaseID, access, ifModifiedSince, ifUnmodifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessPolicyResponder}, req) if err != nil { return nil, err } return resp.(*ContainerSetAccessPolicyResponse), err } // setAccessPolicyPreparer prepares the SetAccessPolicy request. func (client containerClient) setAccessPolicyPreparer(containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "container") params.Set("comp", "acl") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if access != PublicAccessNone { req.Header.Set("x-ms-blob-public-access", string(access)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } b, err := xml.Marshal(SignedIdentifiers{Items: containerACL}) if err != nil { return req, pipeline.NewError(err, "failed to marshal request body") } req.Header.Set("Content-Type", "application/xml") err = req.SetBody(bytes.NewReader(b)) if err != nil { return req, pipeline.NewError(err, "failed to set request body") } return req, nil } // setAccessPolicyResponder handles the response to the SetAccessPolicy request. func (client containerClient) setAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerSetAccessPolicyResponse{rawResponse: resp.Response()}, err } // SetMetadata operation sets one or more user-defined name-value pairs for the specified container. // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. metadata is optional. Specifies a user-defined name-value pair associated with // the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to // the destination blob. If one or more name-value pairs are specified, the destination blob is created with the // specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing // Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only // on a blob if it has been modified since the specified date/time. requestID is provides a client-generated, opaque // value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client containerClient) SetMetadata(ctx context.Context, timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (*ContainerSetMetadataResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) if err != nil { return nil, err } return resp.(*ContainerSetMetadataResponse), err } // setMetadataPreparer prepares the SetMetadata request. func (client containerClient) setMetadataPreparer(timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "container") params.Set("comp", "metadata") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // setMetadataResponder handles the response to the SetMetadata request. func (client containerClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ContainerSetMetadataResponse{rawResponse: resp.Response()}, err } azure-storage-blob-go-0.10.0/azblob/zz_generated_models.go000066400000000000000000006442201367515646300235530ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "encoding/base64" "encoding/xml" "errors" "io" "net/http" "reflect" "strconv" "strings" "time" "unsafe" ) // ETag is an entity tag. type ETag string const ( // ETagNone represents an empty entity tag. ETagNone ETag = "" // ETagAny matches any entity tag. ETagAny ETag = "*" ) // Metadata contains metadata key/value pairs. type Metadata map[string]string const mdPrefix = "x-ms-meta-" const mdPrefixLen = len(mdPrefix) // UnmarshalXML implements the xml.Unmarshaler interface for Metadata. func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { tokName := "" for t, err := d.Token(); err == nil; t, err = d.Token() { switch tt := t.(type) { case xml.StartElement: tokName = strings.ToLower(tt.Name.Local) break case xml.CharData: if *md == nil { *md = Metadata{} } (*md)[tokName] = string(tt) break } } return nil } // Marker represents an opaque value used in paged responses. type Marker struct { Val *string } // NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true // for a just-initialized (zero value) Marker indicating that you should make an initial request to get a result portion from // the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only // after the service has returned the final result portion. func (m Marker) NotDone() bool { return m.Val == nil || *m.Val != "" } // UnmarshalXML implements the xml.Unmarshaler interface for Marker. func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { var out string err := d.DecodeElement(&out, &start) m.Val = &out return err } // concatenates a slice of const values with the specified separator between each item func joinConst(s interface{}, sep string) string { v := reflect.ValueOf(s) if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { panic("s wasn't a slice or array") } ss := make([]string, 0, v.Len()) for i := 0; i < v.Len(); i++ { ss = append(ss, v.Index(i).String()) } return strings.Join(ss, sep) } func validateError(err error) { if err != nil { panic(err) } } // AccessTierType enumerates the values for access tier type. type AccessTierType string const ( // AccessTierArchive ... AccessTierArchive AccessTierType = "Archive" // AccessTierCool ... AccessTierCool AccessTierType = "Cool" // AccessTierHot ... AccessTierHot AccessTierType = "Hot" // AccessTierNone represents an empty AccessTierType. AccessTierNone AccessTierType = "" // AccessTierP10 ... AccessTierP10 AccessTierType = "P10" // AccessTierP15 ... AccessTierP15 AccessTierType = "P15" // AccessTierP20 ... AccessTierP20 AccessTierType = "P20" // AccessTierP30 ... AccessTierP30 AccessTierType = "P30" // AccessTierP4 ... AccessTierP4 AccessTierType = "P4" // AccessTierP40 ... AccessTierP40 AccessTierType = "P40" // AccessTierP50 ... AccessTierP50 AccessTierType = "P50" // AccessTierP6 ... AccessTierP6 AccessTierType = "P6" // AccessTierP60 ... AccessTierP60 AccessTierType = "P60" // AccessTierP70 ... AccessTierP70 AccessTierType = "P70" // AccessTierP80 ... AccessTierP80 AccessTierType = "P80" ) // PossibleAccessTierTypeValues returns an array of possible values for the AccessTierType const type. func PossibleAccessTierTypeValues() []AccessTierType { return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP15, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6, AccessTierP60, AccessTierP70, AccessTierP80} } // AccountKindType enumerates the values for account kind type. type AccountKindType string const ( // AccountKindBlobStorage ... AccountKindBlobStorage AccountKindType = "BlobStorage" // AccountKindNone represents an empty AccountKindType. AccountKindNone AccountKindType = "" // AccountKindStorage ... AccountKindStorage AccountKindType = "Storage" // AccountKindStorageV2 ... AccountKindStorageV2 AccountKindType = "StorageV2" ) // PossibleAccountKindTypeValues returns an array of possible values for the AccountKindType const type. func PossibleAccountKindTypeValues() []AccountKindType { return []AccountKindType{AccountKindBlobStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2} } // ArchiveStatusType enumerates the values for archive status type. type ArchiveStatusType string const ( // ArchiveStatusNone represents an empty ArchiveStatusType. ArchiveStatusNone ArchiveStatusType = "" // ArchiveStatusRehydratePendingToCool ... ArchiveStatusRehydratePendingToCool ArchiveStatusType = "rehydrate-pending-to-cool" // ArchiveStatusRehydratePendingToHot ... ArchiveStatusRehydratePendingToHot ArchiveStatusType = "rehydrate-pending-to-hot" ) // PossibleArchiveStatusTypeValues returns an array of possible values for the ArchiveStatusType const type. func PossibleArchiveStatusTypeValues() []ArchiveStatusType { return []ArchiveStatusType{ArchiveStatusNone, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot} } // BlobType enumerates the values for blob type. type BlobType string const ( // BlobAppendBlob ... BlobAppendBlob BlobType = "AppendBlob" // BlobBlockBlob ... BlobBlockBlob BlobType = "BlockBlob" // BlobNone represents an empty BlobType. BlobNone BlobType = "" // BlobPageBlob ... BlobPageBlob BlobType = "PageBlob" ) // PossibleBlobTypeValues returns an array of possible values for the BlobType const type. func PossibleBlobTypeValues() []BlobType { return []BlobType{BlobAppendBlob, BlobBlockBlob, BlobNone, BlobPageBlob} } // BlockListType enumerates the values for block list type. type BlockListType string const ( // BlockListAll ... BlockListAll BlockListType = "all" // BlockListCommitted ... BlockListCommitted BlockListType = "committed" // BlockListNone represents an empty BlockListType. BlockListNone BlockListType = "" // BlockListUncommitted ... BlockListUncommitted BlockListType = "uncommitted" ) // PossibleBlockListTypeValues returns an array of possible values for the BlockListType const type. func PossibleBlockListTypeValues() []BlockListType { return []BlockListType{BlockListAll, BlockListCommitted, BlockListNone, BlockListUncommitted} } // CopyStatusType enumerates the values for copy status type. type CopyStatusType string const ( // CopyStatusAborted ... CopyStatusAborted CopyStatusType = "aborted" // CopyStatusFailed ... CopyStatusFailed CopyStatusType = "failed" // CopyStatusNone represents an empty CopyStatusType. CopyStatusNone CopyStatusType = "" // CopyStatusPending ... CopyStatusPending CopyStatusType = "pending" // CopyStatusSuccess ... CopyStatusSuccess CopyStatusType = "success" ) // PossibleCopyStatusTypeValues returns an array of possible values for the CopyStatusType const type. func PossibleCopyStatusTypeValues() []CopyStatusType { return []CopyStatusType{CopyStatusAborted, CopyStatusFailed, CopyStatusNone, CopyStatusPending, CopyStatusSuccess} } // DeleteSnapshotsOptionType enumerates the values for delete snapshots option type. type DeleteSnapshotsOptionType string const ( // DeleteSnapshotsOptionInclude ... DeleteSnapshotsOptionInclude DeleteSnapshotsOptionType = "include" // DeleteSnapshotsOptionNone represents an empty DeleteSnapshotsOptionType. DeleteSnapshotsOptionNone DeleteSnapshotsOptionType = "" // DeleteSnapshotsOptionOnly ... DeleteSnapshotsOptionOnly DeleteSnapshotsOptionType = "only" ) // PossibleDeleteSnapshotsOptionTypeValues returns an array of possible values for the DeleteSnapshotsOptionType const type. func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { return []DeleteSnapshotsOptionType{DeleteSnapshotsOptionInclude, DeleteSnapshotsOptionNone, DeleteSnapshotsOptionOnly} } // EncryptionAlgorithmType enumerates the values for encryption algorithm type. type EncryptionAlgorithmType string const ( // EncryptionAlgorithmAES256 ... EncryptionAlgorithmAES256 EncryptionAlgorithmType = "AES256" // EncryptionAlgorithmNone represents an empty EncryptionAlgorithmType. EncryptionAlgorithmNone EncryptionAlgorithmType = "" ) // PossibleEncryptionAlgorithmTypeValues returns an array of possible values for the EncryptionAlgorithmType const type. func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { return []EncryptionAlgorithmType{EncryptionAlgorithmAES256, EncryptionAlgorithmNone} } // GeoReplicationStatusType enumerates the values for geo replication status type. type GeoReplicationStatusType string const ( // GeoReplicationStatusBootstrap ... GeoReplicationStatusBootstrap GeoReplicationStatusType = "bootstrap" // GeoReplicationStatusLive ... GeoReplicationStatusLive GeoReplicationStatusType = "live" // GeoReplicationStatusNone represents an empty GeoReplicationStatusType. GeoReplicationStatusNone GeoReplicationStatusType = "" // GeoReplicationStatusUnavailable ... GeoReplicationStatusUnavailable GeoReplicationStatusType = "unavailable" ) // PossibleGeoReplicationStatusTypeValues returns an array of possible values for the GeoReplicationStatusType const type. func PossibleGeoReplicationStatusTypeValues() []GeoReplicationStatusType { return []GeoReplicationStatusType{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusNone, GeoReplicationStatusUnavailable} } // LeaseDurationType enumerates the values for lease duration type. type LeaseDurationType string const ( // LeaseDurationFixed ... LeaseDurationFixed LeaseDurationType = "fixed" // LeaseDurationInfinite ... LeaseDurationInfinite LeaseDurationType = "infinite" // LeaseDurationNone represents an empty LeaseDurationType. LeaseDurationNone LeaseDurationType = "" ) // PossibleLeaseDurationTypeValues returns an array of possible values for the LeaseDurationType const type. func PossibleLeaseDurationTypeValues() []LeaseDurationType { return []LeaseDurationType{LeaseDurationFixed, LeaseDurationInfinite, LeaseDurationNone} } // LeaseStateType enumerates the values for lease state type. type LeaseStateType string const ( // LeaseStateAvailable ... LeaseStateAvailable LeaseStateType = "available" // LeaseStateBreaking ... LeaseStateBreaking LeaseStateType = "breaking" // LeaseStateBroken ... LeaseStateBroken LeaseStateType = "broken" // LeaseStateExpired ... LeaseStateExpired LeaseStateType = "expired" // LeaseStateLeased ... LeaseStateLeased LeaseStateType = "leased" // LeaseStateNone represents an empty LeaseStateType. LeaseStateNone LeaseStateType = "" ) // PossibleLeaseStateTypeValues returns an array of possible values for the LeaseStateType const type. func PossibleLeaseStateTypeValues() []LeaseStateType { return []LeaseStateType{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased, LeaseStateNone} } // LeaseStatusType enumerates the values for lease status type. type LeaseStatusType string const ( // LeaseStatusLocked ... LeaseStatusLocked LeaseStatusType = "locked" // LeaseStatusNone represents an empty LeaseStatusType. LeaseStatusNone LeaseStatusType = "" // LeaseStatusUnlocked ... LeaseStatusUnlocked LeaseStatusType = "unlocked" ) // PossibleLeaseStatusTypeValues returns an array of possible values for the LeaseStatusType const type. func PossibleLeaseStatusTypeValues() []LeaseStatusType { return []LeaseStatusType{LeaseStatusLocked, LeaseStatusNone, LeaseStatusUnlocked} } // ListBlobsIncludeItemType enumerates the values for list blobs include item type. type ListBlobsIncludeItemType string const ( // ListBlobsIncludeItemCopy ... ListBlobsIncludeItemCopy ListBlobsIncludeItemType = "copy" // ListBlobsIncludeItemDeleted ... ListBlobsIncludeItemDeleted ListBlobsIncludeItemType = "deleted" // ListBlobsIncludeItemMetadata ... ListBlobsIncludeItemMetadata ListBlobsIncludeItemType = "metadata" // ListBlobsIncludeItemNone represents an empty ListBlobsIncludeItemType. ListBlobsIncludeItemNone ListBlobsIncludeItemType = "" // ListBlobsIncludeItemSnapshots ... ListBlobsIncludeItemSnapshots ListBlobsIncludeItemType = "snapshots" // ListBlobsIncludeItemUncommittedblobs ... ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItemType = "uncommittedblobs" ) // PossibleListBlobsIncludeItemTypeValues returns an array of possible values for the ListBlobsIncludeItemType const type. func PossibleListBlobsIncludeItemTypeValues() []ListBlobsIncludeItemType { return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemUncommittedblobs} } // ListContainersIncludeType enumerates the values for list containers include type. type ListContainersIncludeType string const ( // ListContainersIncludeMetadata ... ListContainersIncludeMetadata ListContainersIncludeType = "metadata" // ListContainersIncludeNone represents an empty ListContainersIncludeType. ListContainersIncludeNone ListContainersIncludeType = "" ) // PossibleListContainersIncludeTypeValues returns an array of possible values for the ListContainersIncludeType const type. func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { return []ListContainersIncludeType{ListContainersIncludeMetadata, ListContainersIncludeNone} } // PathRenameModeType enumerates the values for path rename mode type. type PathRenameModeType string const ( // PathRenameModeLegacy ... PathRenameModeLegacy PathRenameModeType = "legacy" // PathRenameModeNone represents an empty PathRenameModeType. PathRenameModeNone PathRenameModeType = "" // PathRenameModePosix ... PathRenameModePosix PathRenameModeType = "posix" ) // PossiblePathRenameModeTypeValues returns an array of possible values for the PathRenameModeType const type. func PossiblePathRenameModeTypeValues() []PathRenameModeType { return []PathRenameModeType{PathRenameModeLegacy, PathRenameModeNone, PathRenameModePosix} } // PremiumPageBlobAccessTierType enumerates the values for premium page blob access tier type. type PremiumPageBlobAccessTierType string const ( // PremiumPageBlobAccessTierNone represents an empty PremiumPageBlobAccessTierType. PremiumPageBlobAccessTierNone PremiumPageBlobAccessTierType = "" // PremiumPageBlobAccessTierP10 ... PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTierType = "P10" // PremiumPageBlobAccessTierP15 ... PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTierType = "P15" // PremiumPageBlobAccessTierP20 ... PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTierType = "P20" // PremiumPageBlobAccessTierP30 ... PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTierType = "P30" // PremiumPageBlobAccessTierP4 ... PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTierType = "P4" // PremiumPageBlobAccessTierP40 ... PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTierType = "P40" // PremiumPageBlobAccessTierP50 ... PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTierType = "P50" // PremiumPageBlobAccessTierP6 ... PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTierType = "P6" // PremiumPageBlobAccessTierP60 ... PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTierType = "P60" // PremiumPageBlobAccessTierP70 ... PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTierType = "P70" // PremiumPageBlobAccessTierP80 ... PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTierType = "P80" ) // PossiblePremiumPageBlobAccessTierTypeValues returns an array of possible values for the PremiumPageBlobAccessTierType const type. func PossiblePremiumPageBlobAccessTierTypeValues() []PremiumPageBlobAccessTierType { return []PremiumPageBlobAccessTierType{PremiumPageBlobAccessTierNone, PremiumPageBlobAccessTierP10, PremiumPageBlobAccessTierP15, PremiumPageBlobAccessTierP20, PremiumPageBlobAccessTierP30, PremiumPageBlobAccessTierP4, PremiumPageBlobAccessTierP40, PremiumPageBlobAccessTierP50, PremiumPageBlobAccessTierP6, PremiumPageBlobAccessTierP60, PremiumPageBlobAccessTierP70, PremiumPageBlobAccessTierP80} } // PublicAccessType enumerates the values for public access type. type PublicAccessType string const ( // PublicAccessBlob ... PublicAccessBlob PublicAccessType = "blob" // PublicAccessContainer ... PublicAccessContainer PublicAccessType = "container" // PublicAccessNone represents an empty PublicAccessType. PublicAccessNone PublicAccessType = "" ) // PossiblePublicAccessTypeValues returns an array of possible values for the PublicAccessType const type. func PossiblePublicAccessTypeValues() []PublicAccessType { return []PublicAccessType{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} } // RehydratePriorityType enumerates the values for rehydrate priority type. type RehydratePriorityType string const ( // RehydratePriorityHigh ... RehydratePriorityHigh RehydratePriorityType = "High" // RehydratePriorityNone represents an empty RehydratePriorityType. RehydratePriorityNone RehydratePriorityType = "" // RehydratePriorityStandard ... RehydratePriorityStandard RehydratePriorityType = "Standard" ) // PossibleRehydratePriorityTypeValues returns an array of possible values for the RehydratePriorityType const type. func PossibleRehydratePriorityTypeValues() []RehydratePriorityType { return []RehydratePriorityType{RehydratePriorityHigh, RehydratePriorityNone, RehydratePriorityStandard} } // SequenceNumberActionType enumerates the values for sequence number action type. type SequenceNumberActionType string const ( // SequenceNumberActionIncrement ... SequenceNumberActionIncrement SequenceNumberActionType = "increment" // SequenceNumberActionMax ... SequenceNumberActionMax SequenceNumberActionType = "max" // SequenceNumberActionNone represents an empty SequenceNumberActionType. SequenceNumberActionNone SequenceNumberActionType = "" // SequenceNumberActionUpdate ... SequenceNumberActionUpdate SequenceNumberActionType = "update" ) // PossibleSequenceNumberActionTypeValues returns an array of possible values for the SequenceNumberActionType const type. func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { return []SequenceNumberActionType{SequenceNumberActionIncrement, SequenceNumberActionMax, SequenceNumberActionNone, SequenceNumberActionUpdate} } // SkuNameType enumerates the values for sku name type. type SkuNameType string const ( // SkuNameNone represents an empty SkuNameType. SkuNameNone SkuNameType = "" // SkuNamePremiumLRS ... SkuNamePremiumLRS SkuNameType = "Premium_LRS" // SkuNameStandardGRS ... SkuNameStandardGRS SkuNameType = "Standard_GRS" // SkuNameStandardLRS ... SkuNameStandardLRS SkuNameType = "Standard_LRS" // SkuNameStandardRAGRS ... SkuNameStandardRAGRS SkuNameType = "Standard_RAGRS" // SkuNameStandardZRS ... SkuNameStandardZRS SkuNameType = "Standard_ZRS" ) // PossibleSkuNameTypeValues returns an array of possible values for the SkuNameType const type. func PossibleSkuNameTypeValues() []SkuNameType { return []SkuNameType{SkuNameNone, SkuNamePremiumLRS, SkuNameStandardGRS, SkuNameStandardLRS, SkuNameStandardRAGRS, SkuNameStandardZRS} } // StorageErrorCodeType enumerates the values for storage error code type. type StorageErrorCodeType string const ( // StorageErrorCodeAccountAlreadyExists ... StorageErrorCodeAccountAlreadyExists StorageErrorCodeType = "AccountAlreadyExists" // StorageErrorCodeAccountBeingCreated ... StorageErrorCodeAccountBeingCreated StorageErrorCodeType = "AccountBeingCreated" // StorageErrorCodeAccountIsDisabled ... StorageErrorCodeAccountIsDisabled StorageErrorCodeType = "AccountIsDisabled" // StorageErrorCodeAppendPositionConditionNotMet ... StorageErrorCodeAppendPositionConditionNotMet StorageErrorCodeType = "AppendPositionConditionNotMet" // StorageErrorCodeAuthenticationFailed ... StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed" // StorageErrorCodeAuthorizationFailure ... StorageErrorCodeAuthorizationFailure StorageErrorCodeType = "AuthorizationFailure" // StorageErrorCodeAuthorizationPermissionMismatch ... StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCodeType = "AuthorizationPermissionMismatch" // StorageErrorCodeAuthorizationProtocolMismatch ... StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCodeType = "AuthorizationProtocolMismatch" // StorageErrorCodeAuthorizationResourceTypeMismatch ... StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCodeType = "AuthorizationResourceTypeMismatch" // StorageErrorCodeAuthorizationServiceMismatch ... StorageErrorCodeAuthorizationServiceMismatch StorageErrorCodeType = "AuthorizationServiceMismatch" // StorageErrorCodeAuthorizationSourceIPMismatch ... StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCodeType = "AuthorizationSourceIPMismatch" // StorageErrorCodeBlobAlreadyExists ... StorageErrorCodeBlobAlreadyExists StorageErrorCodeType = "BlobAlreadyExists" // StorageErrorCodeBlobArchived ... StorageErrorCodeBlobArchived StorageErrorCodeType = "BlobArchived" // StorageErrorCodeBlobBeingRehydrated ... StorageErrorCodeBlobBeingRehydrated StorageErrorCodeType = "BlobBeingRehydrated" // StorageErrorCodeBlobNotArchived ... StorageErrorCodeBlobNotArchived StorageErrorCodeType = "BlobNotArchived" // StorageErrorCodeBlobNotFound ... StorageErrorCodeBlobNotFound StorageErrorCodeType = "BlobNotFound" // StorageErrorCodeBlobOverwritten ... StorageErrorCodeBlobOverwritten StorageErrorCodeType = "BlobOverwritten" // StorageErrorCodeBlobTierInadequateForContentLength ... StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCodeType = "BlobTierInadequateForContentLength" // StorageErrorCodeBlockCountExceedsLimit ... StorageErrorCodeBlockCountExceedsLimit StorageErrorCodeType = "BlockCountExceedsLimit" // StorageErrorCodeBlockListTooLong ... StorageErrorCodeBlockListTooLong StorageErrorCodeType = "BlockListTooLong" // StorageErrorCodeCannotChangeToLowerTier ... StorageErrorCodeCannotChangeToLowerTier StorageErrorCodeType = "CannotChangeToLowerTier" // StorageErrorCodeCannotVerifyCopySource ... StorageErrorCodeCannotVerifyCopySource StorageErrorCodeType = "CannotVerifyCopySource" // StorageErrorCodeConditionHeadersNotSupported ... StorageErrorCodeConditionHeadersNotSupported StorageErrorCodeType = "ConditionHeadersNotSupported" // StorageErrorCodeConditionNotMet ... StorageErrorCodeConditionNotMet StorageErrorCodeType = "ConditionNotMet" // StorageErrorCodeContainerAlreadyExists ... StorageErrorCodeContainerAlreadyExists StorageErrorCodeType = "ContainerAlreadyExists" // StorageErrorCodeContainerBeingDeleted ... StorageErrorCodeContainerBeingDeleted StorageErrorCodeType = "ContainerBeingDeleted" // StorageErrorCodeContainerDisabled ... StorageErrorCodeContainerDisabled StorageErrorCodeType = "ContainerDisabled" // StorageErrorCodeContainerNotFound ... StorageErrorCodeContainerNotFound StorageErrorCodeType = "ContainerNotFound" // StorageErrorCodeContentLengthLargerThanTierLimit ... StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCodeType = "ContentLengthLargerThanTierLimit" // StorageErrorCodeCopyAcrossAccountsNotSupported ... StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCodeType = "CopyAcrossAccountsNotSupported" // StorageErrorCodeCopyIDMismatch ... StorageErrorCodeCopyIDMismatch StorageErrorCodeType = "CopyIdMismatch" // StorageErrorCodeEmptyMetadataKey ... StorageErrorCodeEmptyMetadataKey StorageErrorCodeType = "EmptyMetadataKey" // StorageErrorCodeFeatureVersionMismatch ... StorageErrorCodeFeatureVersionMismatch StorageErrorCodeType = "FeatureVersionMismatch" // StorageErrorCodeIncrementalCopyBlobMismatch ... StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCodeType = "IncrementalCopyBlobMismatch" // StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ... StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" // StorageErrorCodeIncrementalCopySourceMustBeSnapshot ... StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCodeType = "IncrementalCopySourceMustBeSnapshot" // StorageErrorCodeInfiniteLeaseDurationRequired ... StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCodeType = "InfiniteLeaseDurationRequired" // StorageErrorCodeInsufficientAccountPermissions ... StorageErrorCodeInsufficientAccountPermissions StorageErrorCodeType = "InsufficientAccountPermissions" // StorageErrorCodeInternalError ... StorageErrorCodeInternalError StorageErrorCodeType = "InternalError" // StorageErrorCodeInvalidAuthenticationInfo ... StorageErrorCodeInvalidAuthenticationInfo StorageErrorCodeType = "InvalidAuthenticationInfo" // StorageErrorCodeInvalidBlobOrBlock ... StorageErrorCodeInvalidBlobOrBlock StorageErrorCodeType = "InvalidBlobOrBlock" // StorageErrorCodeInvalidBlobTier ... StorageErrorCodeInvalidBlobTier StorageErrorCodeType = "InvalidBlobTier" // StorageErrorCodeInvalidBlobType ... StorageErrorCodeInvalidBlobType StorageErrorCodeType = "InvalidBlobType" // StorageErrorCodeInvalidBlockID ... StorageErrorCodeInvalidBlockID StorageErrorCodeType = "InvalidBlockId" // StorageErrorCodeInvalidBlockList ... StorageErrorCodeInvalidBlockList StorageErrorCodeType = "InvalidBlockList" // StorageErrorCodeInvalidHeaderValue ... StorageErrorCodeInvalidHeaderValue StorageErrorCodeType = "InvalidHeaderValue" // StorageErrorCodeInvalidHTTPVerb ... StorageErrorCodeInvalidHTTPVerb StorageErrorCodeType = "InvalidHttpVerb" // StorageErrorCodeInvalidInput ... StorageErrorCodeInvalidInput StorageErrorCodeType = "InvalidInput" // StorageErrorCodeInvalidMd5 ... StorageErrorCodeInvalidMd5 StorageErrorCodeType = "InvalidMd5" // StorageErrorCodeInvalidMetadata ... StorageErrorCodeInvalidMetadata StorageErrorCodeType = "InvalidMetadata" // StorageErrorCodeInvalidOperation ... StorageErrorCodeInvalidOperation StorageErrorCodeType = "InvalidOperation" // StorageErrorCodeInvalidPageRange ... StorageErrorCodeInvalidPageRange StorageErrorCodeType = "InvalidPageRange" // StorageErrorCodeInvalidQueryParameterValue ... StorageErrorCodeInvalidQueryParameterValue StorageErrorCodeType = "InvalidQueryParameterValue" // StorageErrorCodeInvalidRange ... StorageErrorCodeInvalidRange StorageErrorCodeType = "InvalidRange" // StorageErrorCodeInvalidResourceName ... StorageErrorCodeInvalidResourceName StorageErrorCodeType = "InvalidResourceName" // StorageErrorCodeInvalidSourceBlobType ... StorageErrorCodeInvalidSourceBlobType StorageErrorCodeType = "InvalidSourceBlobType" // StorageErrorCodeInvalidSourceBlobURL ... StorageErrorCodeInvalidSourceBlobURL StorageErrorCodeType = "InvalidSourceBlobUrl" // StorageErrorCodeInvalidURI ... StorageErrorCodeInvalidURI StorageErrorCodeType = "InvalidUri" // StorageErrorCodeInvalidVersionForPageBlobOperation ... StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCodeType = "InvalidVersionForPageBlobOperation" // StorageErrorCodeInvalidXMLDocument ... StorageErrorCodeInvalidXMLDocument StorageErrorCodeType = "InvalidXmlDocument" // StorageErrorCodeInvalidXMLNodeValue ... StorageErrorCodeInvalidXMLNodeValue StorageErrorCodeType = "InvalidXmlNodeValue" // StorageErrorCodeLeaseAlreadyBroken ... StorageErrorCodeLeaseAlreadyBroken StorageErrorCodeType = "LeaseAlreadyBroken" // StorageErrorCodeLeaseAlreadyPresent ... StorageErrorCodeLeaseAlreadyPresent StorageErrorCodeType = "LeaseAlreadyPresent" // StorageErrorCodeLeaseIDMismatchWithBlobOperation ... StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCodeType = "LeaseIdMismatchWithBlobOperation" // StorageErrorCodeLeaseIDMismatchWithContainerOperation ... StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCodeType = "LeaseIdMismatchWithContainerOperation" // StorageErrorCodeLeaseIDMismatchWithLeaseOperation ... StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCodeType = "LeaseIdMismatchWithLeaseOperation" // StorageErrorCodeLeaseIDMissing ... StorageErrorCodeLeaseIDMissing StorageErrorCodeType = "LeaseIdMissing" // StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired ... StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCodeType = "LeaseIsBreakingAndCannotBeAcquired" // StorageErrorCodeLeaseIsBreakingAndCannotBeChanged ... StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCodeType = "LeaseIsBreakingAndCannotBeChanged" // StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed ... StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCodeType = "LeaseIsBrokenAndCannotBeRenewed" // StorageErrorCodeLeaseLost ... StorageErrorCodeLeaseLost StorageErrorCodeType = "LeaseLost" // StorageErrorCodeLeaseNotPresentWithBlobOperation ... StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCodeType = "LeaseNotPresentWithBlobOperation" // StorageErrorCodeLeaseNotPresentWithContainerOperation ... StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCodeType = "LeaseNotPresentWithContainerOperation" // StorageErrorCodeLeaseNotPresentWithLeaseOperation ... StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCodeType = "LeaseNotPresentWithLeaseOperation" // StorageErrorCodeMaxBlobSizeConditionNotMet ... StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCodeType = "MaxBlobSizeConditionNotMet" // StorageErrorCodeMd5Mismatch ... StorageErrorCodeMd5Mismatch StorageErrorCodeType = "Md5Mismatch" // StorageErrorCodeMetadataTooLarge ... StorageErrorCodeMetadataTooLarge StorageErrorCodeType = "MetadataTooLarge" // StorageErrorCodeMissingContentLengthHeader ... StorageErrorCodeMissingContentLengthHeader StorageErrorCodeType = "MissingContentLengthHeader" // StorageErrorCodeMissingRequiredHeader ... StorageErrorCodeMissingRequiredHeader StorageErrorCodeType = "MissingRequiredHeader" // StorageErrorCodeMissingRequiredQueryParameter ... StorageErrorCodeMissingRequiredQueryParameter StorageErrorCodeType = "MissingRequiredQueryParameter" // StorageErrorCodeMissingRequiredXMLNode ... StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode" // StorageErrorCodeMultipleConditionHeadersNotSupported ... StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported" // StorageErrorCodeNone represents an empty StorageErrorCodeType. StorageErrorCodeNone StorageErrorCodeType = "" // StorageErrorCodeNoPendingCopyOperation ... StorageErrorCodeNoPendingCopyOperation StorageErrorCodeType = "NoPendingCopyOperation" // StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob ... StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCodeType = "OperationNotAllowedOnIncrementalCopyBlob" // StorageErrorCodeOperationTimedOut ... StorageErrorCodeOperationTimedOut StorageErrorCodeType = "OperationTimedOut" // StorageErrorCodeOutOfRangeInput ... StorageErrorCodeOutOfRangeInput StorageErrorCodeType = "OutOfRangeInput" // StorageErrorCodeOutOfRangeQueryParameterValue ... StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCodeType = "OutOfRangeQueryParameterValue" // StorageErrorCodePendingCopyOperation ... StorageErrorCodePendingCopyOperation StorageErrorCodeType = "PendingCopyOperation" // StorageErrorCodePreviousSnapshotCannotBeNewer ... StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCodeType = "PreviousSnapshotCannotBeNewer" // StorageErrorCodePreviousSnapshotNotFound ... StorageErrorCodePreviousSnapshotNotFound StorageErrorCodeType = "PreviousSnapshotNotFound" // StorageErrorCodePreviousSnapshotOperationNotSupported ... StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCodeType = "PreviousSnapshotOperationNotSupported" // StorageErrorCodeRequestBodyTooLarge ... StorageErrorCodeRequestBodyTooLarge StorageErrorCodeType = "RequestBodyTooLarge" // StorageErrorCodeRequestURLFailedToParse ... StorageErrorCodeRequestURLFailedToParse StorageErrorCodeType = "RequestUrlFailedToParse" // StorageErrorCodeResourceAlreadyExists ... StorageErrorCodeResourceAlreadyExists StorageErrorCodeType = "ResourceAlreadyExists" // StorageErrorCodeResourceNotFound ... StorageErrorCodeResourceNotFound StorageErrorCodeType = "ResourceNotFound" // StorageErrorCodeResourceTypeMismatch ... StorageErrorCodeResourceTypeMismatch StorageErrorCodeType = "ResourceTypeMismatch" // StorageErrorCodeSequenceNumberConditionNotMet ... StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCodeType = "SequenceNumberConditionNotMet" // StorageErrorCodeSequenceNumberIncrementTooLarge ... StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCodeType = "SequenceNumberIncrementTooLarge" // StorageErrorCodeServerBusy ... StorageErrorCodeServerBusy StorageErrorCodeType = "ServerBusy" // StorageErrorCodeSnaphotOperationRateExceeded ... StorageErrorCodeSnaphotOperationRateExceeded StorageErrorCodeType = "SnaphotOperationRateExceeded" // StorageErrorCodeSnapshotCountExceeded ... StorageErrorCodeSnapshotCountExceeded StorageErrorCodeType = "SnapshotCountExceeded" // StorageErrorCodeSnapshotsPresent ... StorageErrorCodeSnapshotsPresent StorageErrorCodeType = "SnapshotsPresent" // StorageErrorCodeSourceConditionNotMet ... StorageErrorCodeSourceConditionNotMet StorageErrorCodeType = "SourceConditionNotMet" // StorageErrorCodeSystemInUse ... StorageErrorCodeSystemInUse StorageErrorCodeType = "SystemInUse" // StorageErrorCodeTargetConditionNotMet ... StorageErrorCodeTargetConditionNotMet StorageErrorCodeType = "TargetConditionNotMet" // StorageErrorCodeUnauthorizedBlobOverwrite ... StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCodeType = "UnauthorizedBlobOverwrite" // StorageErrorCodeUnsupportedHeader ... StorageErrorCodeUnsupportedHeader StorageErrorCodeType = "UnsupportedHeader" // StorageErrorCodeUnsupportedHTTPVerb ... StorageErrorCodeUnsupportedHTTPVerb StorageErrorCodeType = "UnsupportedHttpVerb" // StorageErrorCodeUnsupportedQueryParameter ... StorageErrorCodeUnsupportedQueryParameter StorageErrorCodeType = "UnsupportedQueryParameter" // StorageErrorCodeUnsupportedXMLNode ... StorageErrorCodeUnsupportedXMLNode StorageErrorCodeType = "UnsupportedXmlNode" ) // PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type. func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType { return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeAuthorizationFailure, StorageErrorCodeAuthorizationPermissionMismatch, StorageErrorCodeAuthorizationProtocolMismatch, StorageErrorCodeAuthorizationResourceTypeMismatch, StorageErrorCodeAuthorizationServiceMismatch, StorageErrorCodeAuthorizationSourceIPMismatch, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnaphotOperationRateExceeded, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode} } // SyncCopyStatusType enumerates the values for sync copy status type. type SyncCopyStatusType string const ( // SyncCopyStatusNone represents an empty SyncCopyStatusType. SyncCopyStatusNone SyncCopyStatusType = "" // SyncCopyStatusSuccess ... SyncCopyStatusSuccess SyncCopyStatusType = "success" ) // PossibleSyncCopyStatusTypeValues returns an array of possible values for the SyncCopyStatusType const type. func PossibleSyncCopyStatusTypeValues() []SyncCopyStatusType { return []SyncCopyStatusType{SyncCopyStatusNone, SyncCopyStatusSuccess} } // AccessPolicy - An Access policy type AccessPolicy struct { // Start - the date-time the policy is active Start time.Time `xml:"Start"` // Expiry - the date-time the policy expires Expiry time.Time `xml:"Expiry"` // Permission - the permissions for the acl policy Permission string `xml:"Permission"` } // MarshalXML implements the xml.Marshaler interface for AccessPolicy. func (ap AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { ap2 := (*accessPolicy)(unsafe.Pointer(&ap)) return e.EncodeElement(*ap2, start) } // UnmarshalXML implements the xml.Unmarshaler interface for AccessPolicy. func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { ap2 := (*accessPolicy)(unsafe.Pointer(ap)) return d.DecodeElement(ap2, &start) } // AppendBlobAppendBlockFromURLResponse ... type AppendBlobAppendBlockFromURLResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (ababfur AppendBlobAppendBlockFromURLResponse) Response() *http.Response { return ababfur.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (ababfur AppendBlobAppendBlockFromURLResponse) StatusCode() int { return ababfur.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (ababfur AppendBlobAppendBlockFromURLResponse) Status() string { return ababfur.rawResponse.Status } // BlobAppendOffset returns the value for header x-ms-blob-append-offset. func (ababfur AppendBlobAppendBlockFromURLResponse) BlobAppendOffset() string { return ababfur.rawResponse.Header.Get("x-ms-blob-append-offset") } // BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. func (ababfur AppendBlobAppendBlockFromURLResponse) BlobCommittedBlockCount() int32 { s := ababfur.rawResponse.Header.Get("x-ms-blob-committed-block-count") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 32) if err != nil { i = 0 } return int32(i) } // ContentMD5 returns the value for header Content-MD5. func (ababfur AppendBlobAppendBlockFromURLResponse) ContentMD5() []byte { s := ababfur.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (ababfur AppendBlobAppendBlockFromURLResponse) Date() time.Time { s := ababfur.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionKeySha256() string { return ababfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (ababfur AppendBlobAppendBlockFromURLResponse) ErrorCode() string { return ababfur.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (ababfur AppendBlobAppendBlockFromURLResponse) ETag() ETag { return ETag(ababfur.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (ababfur AppendBlobAppendBlockFromURLResponse) IsServerEncrypted() string { return ababfur.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (ababfur AppendBlobAppendBlockFromURLResponse) LastModified() time.Time { s := ababfur.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (ababfur AppendBlobAppendBlockFromURLResponse) RequestID() string { return ababfur.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (ababfur AppendBlobAppendBlockFromURLResponse) Version() string { return ababfur.rawResponse.Header.Get("x-ms-version") } // XMsContentCrc64 returns the value for header x-ms-content-crc64. func (ababfur AppendBlobAppendBlockFromURLResponse) XMsContentCrc64() []byte { s := ababfur.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // AppendBlobAppendBlockResponse ... type AppendBlobAppendBlockResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (ababr AppendBlobAppendBlockResponse) Response() *http.Response { return ababr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (ababr AppendBlobAppendBlockResponse) StatusCode() int { return ababr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (ababr AppendBlobAppendBlockResponse) Status() string { return ababr.rawResponse.Status } // BlobAppendOffset returns the value for header x-ms-blob-append-offset. func (ababr AppendBlobAppendBlockResponse) BlobAppendOffset() string { return ababr.rawResponse.Header.Get("x-ms-blob-append-offset") } // BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. func (ababr AppendBlobAppendBlockResponse) BlobCommittedBlockCount() int32 { s := ababr.rawResponse.Header.Get("x-ms-blob-committed-block-count") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 32) if err != nil { i = 0 } return int32(i) } // ClientRequestID returns the value for header x-ms-client-request-id. func (ababr AppendBlobAppendBlockResponse) ClientRequestID() string { return ababr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (ababr AppendBlobAppendBlockResponse) ContentMD5() []byte { s := ababr.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (ababr AppendBlobAppendBlockResponse) Date() time.Time { s := ababr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (ababr AppendBlobAppendBlockResponse) EncryptionKeySha256() string { return ababr.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (ababr AppendBlobAppendBlockResponse) ErrorCode() string { return ababr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (ababr AppendBlobAppendBlockResponse) ETag() ETag { return ETag(ababr.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (ababr AppendBlobAppendBlockResponse) IsServerEncrypted() string { return ababr.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (ababr AppendBlobAppendBlockResponse) LastModified() time.Time { s := ababr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (ababr AppendBlobAppendBlockResponse) RequestID() string { return ababr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (ababr AppendBlobAppendBlockResponse) Version() string { return ababr.rawResponse.Header.Get("x-ms-version") } // XMsContentCrc64 returns the value for header x-ms-content-crc64. func (ababr AppendBlobAppendBlockResponse) XMsContentCrc64() []byte { s := ababr.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // AppendBlobCreateResponse ... type AppendBlobCreateResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (abcr AppendBlobCreateResponse) Response() *http.Response { return abcr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (abcr AppendBlobCreateResponse) StatusCode() int { return abcr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (abcr AppendBlobCreateResponse) Status() string { return abcr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (abcr AppendBlobCreateResponse) ClientRequestID() string { return abcr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (abcr AppendBlobCreateResponse) ContentMD5() []byte { s := abcr.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (abcr AppendBlobCreateResponse) Date() time.Time { s := abcr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (abcr AppendBlobCreateResponse) EncryptionKeySha256() string { return abcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (abcr AppendBlobCreateResponse) ErrorCode() string { return abcr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (abcr AppendBlobCreateResponse) ETag() ETag { return ETag(abcr.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (abcr AppendBlobCreateResponse) IsServerEncrypted() string { return abcr.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (abcr AppendBlobCreateResponse) LastModified() time.Time { s := abcr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (abcr AppendBlobCreateResponse) RequestID() string { return abcr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (abcr AppendBlobCreateResponse) Version() string { return abcr.rawResponse.Header.Get("x-ms-version") } // BlobAbortCopyFromURLResponse ... type BlobAbortCopyFromURLResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bacfur BlobAbortCopyFromURLResponse) Response() *http.Response { return bacfur.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bacfur BlobAbortCopyFromURLResponse) StatusCode() int { return bacfur.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bacfur BlobAbortCopyFromURLResponse) Status() string { return bacfur.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bacfur BlobAbortCopyFromURLResponse) ClientRequestID() string { return bacfur.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bacfur BlobAbortCopyFromURLResponse) Date() time.Time { s := bacfur.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bacfur BlobAbortCopyFromURLResponse) ErrorCode() string { return bacfur.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (bacfur BlobAbortCopyFromURLResponse) RequestID() string { return bacfur.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bacfur BlobAbortCopyFromURLResponse) Version() string { return bacfur.rawResponse.Header.Get("x-ms-version") } // BlobAcquireLeaseResponse ... type BlobAcquireLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (balr BlobAcquireLeaseResponse) Response() *http.Response { return balr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (balr BlobAcquireLeaseResponse) StatusCode() int { return balr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (balr BlobAcquireLeaseResponse) Status() string { return balr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (balr BlobAcquireLeaseResponse) ClientRequestID() string { return balr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (balr BlobAcquireLeaseResponse) Date() time.Time { s := balr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (balr BlobAcquireLeaseResponse) ErrorCode() string { return balr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (balr BlobAcquireLeaseResponse) ETag() ETag { return ETag(balr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (balr BlobAcquireLeaseResponse) LastModified() time.Time { s := balr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseID returns the value for header x-ms-lease-id. func (balr BlobAcquireLeaseResponse) LeaseID() string { return balr.rawResponse.Header.Get("x-ms-lease-id") } // RequestID returns the value for header x-ms-request-id. func (balr BlobAcquireLeaseResponse) RequestID() string { return balr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (balr BlobAcquireLeaseResponse) Version() string { return balr.rawResponse.Header.Get("x-ms-version") } // BlobBreakLeaseResponse ... type BlobBreakLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bblr BlobBreakLeaseResponse) Response() *http.Response { return bblr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bblr BlobBreakLeaseResponse) StatusCode() int { return bblr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bblr BlobBreakLeaseResponse) Status() string { return bblr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bblr BlobBreakLeaseResponse) ClientRequestID() string { return bblr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bblr BlobBreakLeaseResponse) Date() time.Time { s := bblr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bblr BlobBreakLeaseResponse) ErrorCode() string { return bblr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bblr BlobBreakLeaseResponse) ETag() ETag { return ETag(bblr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (bblr BlobBreakLeaseResponse) LastModified() time.Time { s := bblr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseTime returns the value for header x-ms-lease-time. func (bblr BlobBreakLeaseResponse) LeaseTime() int32 { s := bblr.rawResponse.Header.Get("x-ms-lease-time") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 32) if err != nil { i = 0 } return int32(i) } // RequestID returns the value for header x-ms-request-id. func (bblr BlobBreakLeaseResponse) RequestID() string { return bblr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bblr BlobBreakLeaseResponse) Version() string { return bblr.rawResponse.Header.Get("x-ms-version") } // BlobChangeLeaseResponse ... type BlobChangeLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bclr BlobChangeLeaseResponse) Response() *http.Response { return bclr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bclr BlobChangeLeaseResponse) StatusCode() int { return bclr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bclr BlobChangeLeaseResponse) Status() string { return bclr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bclr BlobChangeLeaseResponse) ClientRequestID() string { return bclr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bclr BlobChangeLeaseResponse) Date() time.Time { s := bclr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bclr BlobChangeLeaseResponse) ErrorCode() string { return bclr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bclr BlobChangeLeaseResponse) ETag() ETag { return ETag(bclr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (bclr BlobChangeLeaseResponse) LastModified() time.Time { s := bclr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseID returns the value for header x-ms-lease-id. func (bclr BlobChangeLeaseResponse) LeaseID() string { return bclr.rawResponse.Header.Get("x-ms-lease-id") } // RequestID returns the value for header x-ms-request-id. func (bclr BlobChangeLeaseResponse) RequestID() string { return bclr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bclr BlobChangeLeaseResponse) Version() string { return bclr.rawResponse.Header.Get("x-ms-version") } // BlobCopyFromURLResponse ... type BlobCopyFromURLResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bcfur BlobCopyFromURLResponse) Response() *http.Response { return bcfur.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bcfur BlobCopyFromURLResponse) StatusCode() int { return bcfur.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bcfur BlobCopyFromURLResponse) Status() string { return bcfur.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bcfur BlobCopyFromURLResponse) ClientRequestID() string { return bcfur.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (bcfur BlobCopyFromURLResponse) ContentMD5() []byte { s := bcfur.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // CopyID returns the value for header x-ms-copy-id. func (bcfur BlobCopyFromURLResponse) CopyID() string { return bcfur.rawResponse.Header.Get("x-ms-copy-id") } // CopyStatus returns the value for header x-ms-copy-status. func (bcfur BlobCopyFromURLResponse) CopyStatus() SyncCopyStatusType { return SyncCopyStatusType(bcfur.rawResponse.Header.Get("x-ms-copy-status")) } // Date returns the value for header Date. func (bcfur BlobCopyFromURLResponse) Date() time.Time { s := bcfur.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bcfur BlobCopyFromURLResponse) ErrorCode() string { return bcfur.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bcfur BlobCopyFromURLResponse) ETag() ETag { return ETag(bcfur.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (bcfur BlobCopyFromURLResponse) LastModified() time.Time { s := bcfur.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bcfur BlobCopyFromURLResponse) RequestID() string { return bcfur.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bcfur BlobCopyFromURLResponse) Version() string { return bcfur.rawResponse.Header.Get("x-ms-version") } // XMsContentCrc64 returns the value for header x-ms-content-crc64. func (bcfur BlobCopyFromURLResponse) XMsContentCrc64() []byte { s := bcfur.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // BlobCreateSnapshotResponse ... type BlobCreateSnapshotResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bcsr BlobCreateSnapshotResponse) Response() *http.Response { return bcsr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bcsr BlobCreateSnapshotResponse) StatusCode() int { return bcsr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bcsr BlobCreateSnapshotResponse) Status() string { return bcsr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bcsr BlobCreateSnapshotResponse) ClientRequestID() string { return bcsr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bcsr BlobCreateSnapshotResponse) Date() time.Time { s := bcsr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bcsr BlobCreateSnapshotResponse) ErrorCode() string { return bcsr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bcsr BlobCreateSnapshotResponse) ETag() ETag { return ETag(bcsr.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (bcsr BlobCreateSnapshotResponse) IsServerEncrypted() string { return bcsr.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (bcsr BlobCreateSnapshotResponse) LastModified() time.Time { s := bcsr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bcsr BlobCreateSnapshotResponse) RequestID() string { return bcsr.rawResponse.Header.Get("x-ms-request-id") } // Snapshot returns the value for header x-ms-snapshot. func (bcsr BlobCreateSnapshotResponse) Snapshot() string { return bcsr.rawResponse.Header.Get("x-ms-snapshot") } // Version returns the value for header x-ms-version. func (bcsr BlobCreateSnapshotResponse) Version() string { return bcsr.rawResponse.Header.Get("x-ms-version") } // BlobDeleteResponse ... type BlobDeleteResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bdr BlobDeleteResponse) Response() *http.Response { return bdr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bdr BlobDeleteResponse) StatusCode() int { return bdr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bdr BlobDeleteResponse) Status() string { return bdr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bdr BlobDeleteResponse) ClientRequestID() string { return bdr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bdr BlobDeleteResponse) Date() time.Time { s := bdr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bdr BlobDeleteResponse) ErrorCode() string { return bdr.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (bdr BlobDeleteResponse) RequestID() string { return bdr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bdr BlobDeleteResponse) Version() string { return bdr.rawResponse.Header.Get("x-ms-version") } // BlobFlatListSegment ... type BlobFlatListSegment struct { // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"Blobs"` BlobItems []BlobItem `xml:"Blob"` } // BlobGetAccessControlResponse ... type BlobGetAccessControlResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bgacr BlobGetAccessControlResponse) Response() *http.Response { return bgacr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bgacr BlobGetAccessControlResponse) StatusCode() int { return bgacr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bgacr BlobGetAccessControlResponse) Status() string { return bgacr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bgacr BlobGetAccessControlResponse) ClientRequestID() string { return bgacr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bgacr BlobGetAccessControlResponse) Date() time.Time { s := bgacr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ETag returns the value for header ETag. func (bgacr BlobGetAccessControlResponse) ETag() ETag { return ETag(bgacr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (bgacr BlobGetAccessControlResponse) LastModified() time.Time { s := bgacr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bgacr BlobGetAccessControlResponse) RequestID() string { return bgacr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bgacr BlobGetAccessControlResponse) Version() string { return bgacr.rawResponse.Header.Get("x-ms-version") } // XMsACL returns the value for header x-ms-acl. func (bgacr BlobGetAccessControlResponse) XMsACL() string { return bgacr.rawResponse.Header.Get("x-ms-acl") } // XMsGroup returns the value for header x-ms-group. func (bgacr BlobGetAccessControlResponse) XMsGroup() string { return bgacr.rawResponse.Header.Get("x-ms-group") } // XMsOwner returns the value for header x-ms-owner. func (bgacr BlobGetAccessControlResponse) XMsOwner() string { return bgacr.rawResponse.Header.Get("x-ms-owner") } // XMsPermissions returns the value for header x-ms-permissions. func (bgacr BlobGetAccessControlResponse) XMsPermissions() string { return bgacr.rawResponse.Header.Get("x-ms-permissions") } // BlobGetAccountInfoResponse ... type BlobGetAccountInfoResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bgair BlobGetAccountInfoResponse) Response() *http.Response { return bgair.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bgair BlobGetAccountInfoResponse) StatusCode() int { return bgair.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bgair BlobGetAccountInfoResponse) Status() string { return bgair.rawResponse.Status } // AccountKind returns the value for header x-ms-account-kind. func (bgair BlobGetAccountInfoResponse) AccountKind() AccountKindType { return AccountKindType(bgair.rawResponse.Header.Get("x-ms-account-kind")) } // ClientRequestID returns the value for header x-ms-client-request-id. func (bgair BlobGetAccountInfoResponse) ClientRequestID() string { return bgair.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bgair BlobGetAccountInfoResponse) Date() time.Time { s := bgair.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bgair BlobGetAccountInfoResponse) ErrorCode() string { return bgair.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (bgair BlobGetAccountInfoResponse) RequestID() string { return bgair.rawResponse.Header.Get("x-ms-request-id") } // SkuName returns the value for header x-ms-sku-name. func (bgair BlobGetAccountInfoResponse) SkuName() SkuNameType { return SkuNameType(bgair.rawResponse.Header.Get("x-ms-sku-name")) } // Version returns the value for header x-ms-version. func (bgair BlobGetAccountInfoResponse) Version() string { return bgair.rawResponse.Header.Get("x-ms-version") } // BlobGetPropertiesResponse ... type BlobGetPropertiesResponse struct { rawResponse *http.Response } // NewMetadata returns user-defined key/value pairs. func (bgpr BlobGetPropertiesResponse) NewMetadata() Metadata { md := Metadata{} for k, v := range bgpr.rawResponse.Header { if len(k) > mdPrefixLen { if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { md[strings.ToLower(k[mdPrefixLen:])] = v[0] } } } return md } // Response returns the raw HTTP response object. func (bgpr BlobGetPropertiesResponse) Response() *http.Response { return bgpr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bgpr BlobGetPropertiesResponse) StatusCode() int { return bgpr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bgpr BlobGetPropertiesResponse) Status() string { return bgpr.rawResponse.Status } // AcceptRanges returns the value for header Accept-Ranges. func (bgpr BlobGetPropertiesResponse) AcceptRanges() string { return bgpr.rawResponse.Header.Get("Accept-Ranges") } // AccessTier returns the value for header x-ms-access-tier. func (bgpr BlobGetPropertiesResponse) AccessTier() string { return bgpr.rawResponse.Header.Get("x-ms-access-tier") } // AccessTierChangeTime returns the value for header x-ms-access-tier-change-time. func (bgpr BlobGetPropertiesResponse) AccessTierChangeTime() time.Time { s := bgpr.rawResponse.Header.Get("x-ms-access-tier-change-time") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // AccessTierInferred returns the value for header x-ms-access-tier-inferred. func (bgpr BlobGetPropertiesResponse) AccessTierInferred() string { return bgpr.rawResponse.Header.Get("x-ms-access-tier-inferred") } // ArchiveStatus returns the value for header x-ms-archive-status. func (bgpr BlobGetPropertiesResponse) ArchiveStatus() string { return bgpr.rawResponse.Header.Get("x-ms-archive-status") } // BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. func (bgpr BlobGetPropertiesResponse) BlobCommittedBlockCount() int32 { s := bgpr.rawResponse.Header.Get("x-ms-blob-committed-block-count") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 32) if err != nil { i = 0 } return int32(i) } // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. func (bgpr BlobGetPropertiesResponse) BlobSequenceNumber() int64 { s := bgpr.rawResponse.Header.Get("x-ms-blob-sequence-number") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // BlobType returns the value for header x-ms-blob-type. func (bgpr BlobGetPropertiesResponse) BlobType() BlobType { return BlobType(bgpr.rawResponse.Header.Get("x-ms-blob-type")) } // CacheControl returns the value for header Cache-Control. func (bgpr BlobGetPropertiesResponse) CacheControl() string { return bgpr.rawResponse.Header.Get("Cache-Control") } // ClientRequestID returns the value for header x-ms-client-request-id. func (bgpr BlobGetPropertiesResponse) ClientRequestID() string { return bgpr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentDisposition returns the value for header Content-Disposition. func (bgpr BlobGetPropertiesResponse) ContentDisposition() string { return bgpr.rawResponse.Header.Get("Content-Disposition") } // ContentEncoding returns the value for header Content-Encoding. func (bgpr BlobGetPropertiesResponse) ContentEncoding() string { return bgpr.rawResponse.Header.Get("Content-Encoding") } // ContentLanguage returns the value for header Content-Language. func (bgpr BlobGetPropertiesResponse) ContentLanguage() string { return bgpr.rawResponse.Header.Get("Content-Language") } // ContentLength returns the value for header Content-Length. func (bgpr BlobGetPropertiesResponse) ContentLength() int64 { s := bgpr.rawResponse.Header.Get("Content-Length") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ContentMD5 returns the value for header Content-MD5. func (bgpr BlobGetPropertiesResponse) ContentMD5() []byte { s := bgpr.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // ContentType returns the value for header Content-Type. func (bgpr BlobGetPropertiesResponse) ContentType() string { return bgpr.rawResponse.Header.Get("Content-Type") } // CopyCompletionTime returns the value for header x-ms-copy-completion-time. func (bgpr BlobGetPropertiesResponse) CopyCompletionTime() time.Time { s := bgpr.rawResponse.Header.Get("x-ms-copy-completion-time") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // CopyID returns the value for header x-ms-copy-id. func (bgpr BlobGetPropertiesResponse) CopyID() string { return bgpr.rawResponse.Header.Get("x-ms-copy-id") } // CopyProgress returns the value for header x-ms-copy-progress. func (bgpr BlobGetPropertiesResponse) CopyProgress() string { return bgpr.rawResponse.Header.Get("x-ms-copy-progress") } // CopySource returns the value for header x-ms-copy-source. func (bgpr BlobGetPropertiesResponse) CopySource() string { return bgpr.rawResponse.Header.Get("x-ms-copy-source") } // CopyStatus returns the value for header x-ms-copy-status. func (bgpr BlobGetPropertiesResponse) CopyStatus() CopyStatusType { return CopyStatusType(bgpr.rawResponse.Header.Get("x-ms-copy-status")) } // CopyStatusDescription returns the value for header x-ms-copy-status-description. func (bgpr BlobGetPropertiesResponse) CopyStatusDescription() string { return bgpr.rawResponse.Header.Get("x-ms-copy-status-description") } // CreationTime returns the value for header x-ms-creation-time. func (bgpr BlobGetPropertiesResponse) CreationTime() time.Time { s := bgpr.rawResponse.Header.Get("x-ms-creation-time") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // Date returns the value for header Date. func (bgpr BlobGetPropertiesResponse) Date() time.Time { s := bgpr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // DestinationSnapshot returns the value for header x-ms-copy-destination-snapshot. func (bgpr BlobGetPropertiesResponse) DestinationSnapshot() string { return bgpr.rawResponse.Header.Get("x-ms-copy-destination-snapshot") } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (bgpr BlobGetPropertiesResponse) EncryptionKeySha256() string { return bgpr.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (bgpr BlobGetPropertiesResponse) ErrorCode() string { return bgpr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bgpr BlobGetPropertiesResponse) ETag() ETag { return ETag(bgpr.rawResponse.Header.Get("ETag")) } // IsIncrementalCopy returns the value for header x-ms-incremental-copy. func (bgpr BlobGetPropertiesResponse) IsIncrementalCopy() string { return bgpr.rawResponse.Header.Get("x-ms-incremental-copy") } // IsServerEncrypted returns the value for header x-ms-server-encrypted. func (bgpr BlobGetPropertiesResponse) IsServerEncrypted() string { return bgpr.rawResponse.Header.Get("x-ms-server-encrypted") } // LastModified returns the value for header Last-Modified. func (bgpr BlobGetPropertiesResponse) LastModified() time.Time { s := bgpr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseDuration returns the value for header x-ms-lease-duration. func (bgpr BlobGetPropertiesResponse) LeaseDuration() LeaseDurationType { return LeaseDurationType(bgpr.rawResponse.Header.Get("x-ms-lease-duration")) } // LeaseState returns the value for header x-ms-lease-state. func (bgpr BlobGetPropertiesResponse) LeaseState() LeaseStateType { return LeaseStateType(bgpr.rawResponse.Header.Get("x-ms-lease-state")) } // LeaseStatus returns the value for header x-ms-lease-status. func (bgpr BlobGetPropertiesResponse) LeaseStatus() LeaseStatusType { return LeaseStatusType(bgpr.rawResponse.Header.Get("x-ms-lease-status")) } // RequestID returns the value for header x-ms-request-id. func (bgpr BlobGetPropertiesResponse) RequestID() string { return bgpr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bgpr BlobGetPropertiesResponse) Version() string { return bgpr.rawResponse.Header.Get("x-ms-version") } // BlobHierarchyListSegment ... type BlobHierarchyListSegment struct { // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"Blobs"` BlobPrefixes []BlobPrefix `xml:"BlobPrefix"` BlobItems []BlobItem `xml:"Blob"` } // BlobItem - An Azure Storage blob type BlobItem struct { // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"Blob"` Name string `xml:"Name"` Deleted bool `xml:"Deleted"` Snapshot string `xml:"Snapshot"` Properties BlobProperties `xml:"Properties"` Metadata Metadata `xml:"Metadata"` } // BlobPrefix ... type BlobPrefix struct { Name string `xml:"Name"` } // BlobProperties - Properties of a blob type BlobProperties struct { // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"Properties"` CreationTime *time.Time `xml:"Creation-Time"` LastModified time.Time `xml:"Last-Modified"` Etag ETag `xml:"Etag"` // ContentLength - Size in bytes ContentLength *int64 `xml:"Content-Length"` ContentType *string `xml:"Content-Type"` ContentEncoding *string `xml:"Content-Encoding"` ContentLanguage *string `xml:"Content-Language"` ContentMD5 []byte `xml:"Content-MD5"` ContentDisposition *string `xml:"Content-Disposition"` CacheControl *string `xml:"Cache-Control"` BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` // BlobType - Possible values include: 'BlobBlockBlob', 'BlobPageBlob', 'BlobAppendBlob', 'BlobNone' BlobType BlobType `xml:"BlobType"` // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' LeaseStatus LeaseStatusType `xml:"LeaseStatus"` // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' LeaseState LeaseStateType `xml:"LeaseState"` // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' LeaseDuration LeaseDurationType `xml:"LeaseDuration"` CopyID *string `xml:"CopyId"` // CopyStatus - Possible values include: 'CopyStatusPending', 'CopyStatusSuccess', 'CopyStatusAborted', 'CopyStatusFailed', 'CopyStatusNone' CopyStatus CopyStatusType `xml:"CopyStatus"` CopySource *string `xml:"CopySource"` CopyProgress *string `xml:"CopyProgress"` CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` CopyStatusDescription *string `xml:"CopyStatusDescription"` ServerEncrypted *bool `xml:"ServerEncrypted"` IncrementalCopy *bool `xml:"IncrementalCopy"` DestinationSnapshot *string `xml:"DestinationSnapshot"` DeletedTime *time.Time `xml:"DeletedTime"` RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP15', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierP60', 'AccessTierP70', 'AccessTierP80', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone' AccessTier AccessTierType `xml:"AccessTier"` AccessTierInferred *bool `xml:"AccessTierInferred"` // ArchiveStatus - Possible values include: 'ArchiveStatusRehydratePendingToHot', 'ArchiveStatusRehydratePendingToCool', 'ArchiveStatusNone' ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` } // MarshalXML implements the xml.Marshaler interface for BlobProperties. func (bp BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { bp2 := (*blobProperties)(unsafe.Pointer(&bp)) return e.EncodeElement(*bp2, start) } // UnmarshalXML implements the xml.Unmarshaler interface for BlobProperties. func (bp *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { bp2 := (*blobProperties)(unsafe.Pointer(bp)) return d.DecodeElement(bp2, &start) } // BlobReleaseLeaseResponse ... type BlobReleaseLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (brlr BlobReleaseLeaseResponse) Response() *http.Response { return brlr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (brlr BlobReleaseLeaseResponse) StatusCode() int { return brlr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (brlr BlobReleaseLeaseResponse) Status() string { return brlr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (brlr BlobReleaseLeaseResponse) ClientRequestID() string { return brlr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (brlr BlobReleaseLeaseResponse) Date() time.Time { s := brlr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (brlr BlobReleaseLeaseResponse) ErrorCode() string { return brlr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (brlr BlobReleaseLeaseResponse) ETag() ETag { return ETag(brlr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (brlr BlobReleaseLeaseResponse) LastModified() time.Time { s := brlr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (brlr BlobReleaseLeaseResponse) RequestID() string { return brlr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (brlr BlobReleaseLeaseResponse) Version() string { return brlr.rawResponse.Header.Get("x-ms-version") } // BlobRenameResponse ... type BlobRenameResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (brr BlobRenameResponse) Response() *http.Response { return brr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (brr BlobRenameResponse) StatusCode() int { return brr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (brr BlobRenameResponse) Status() string { return brr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (brr BlobRenameResponse) ClientRequestID() string { return brr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentLength returns the value for header Content-Length. func (brr BlobRenameResponse) ContentLength() int64 { s := brr.rawResponse.Header.Get("Content-Length") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // Date returns the value for header Date. func (brr BlobRenameResponse) Date() time.Time { s := brr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ETag returns the value for header ETag. func (brr BlobRenameResponse) ETag() ETag { return ETag(brr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (brr BlobRenameResponse) LastModified() time.Time { s := brr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (brr BlobRenameResponse) RequestID() string { return brr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (brr BlobRenameResponse) Version() string { return brr.rawResponse.Header.Get("x-ms-version") } // BlobRenewLeaseResponse ... type BlobRenewLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (brlr BlobRenewLeaseResponse) Response() *http.Response { return brlr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (brlr BlobRenewLeaseResponse) StatusCode() int { return brlr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (brlr BlobRenewLeaseResponse) Status() string { return brlr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (brlr BlobRenewLeaseResponse) ClientRequestID() string { return brlr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (brlr BlobRenewLeaseResponse) Date() time.Time { s := brlr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (brlr BlobRenewLeaseResponse) ErrorCode() string { return brlr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (brlr BlobRenewLeaseResponse) ETag() ETag { return ETag(brlr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (brlr BlobRenewLeaseResponse) LastModified() time.Time { s := brlr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseID returns the value for header x-ms-lease-id. func (brlr BlobRenewLeaseResponse) LeaseID() string { return brlr.rawResponse.Header.Get("x-ms-lease-id") } // RequestID returns the value for header x-ms-request-id. func (brlr BlobRenewLeaseResponse) RequestID() string { return brlr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (brlr BlobRenewLeaseResponse) Version() string { return brlr.rawResponse.Header.Get("x-ms-version") } // BlobSetAccessControlResponse ... type BlobSetAccessControlResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bsacr BlobSetAccessControlResponse) Response() *http.Response { return bsacr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bsacr BlobSetAccessControlResponse) StatusCode() int { return bsacr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bsacr BlobSetAccessControlResponse) Status() string { return bsacr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bsacr BlobSetAccessControlResponse) ClientRequestID() string { return bsacr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bsacr BlobSetAccessControlResponse) Date() time.Time { s := bsacr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ETag returns the value for header ETag. func (bsacr BlobSetAccessControlResponse) ETag() ETag { return ETag(bsacr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (bsacr BlobSetAccessControlResponse) LastModified() time.Time { s := bsacr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bsacr BlobSetAccessControlResponse) RequestID() string { return bsacr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bsacr BlobSetAccessControlResponse) Version() string { return bsacr.rawResponse.Header.Get("x-ms-version") } // BlobSetHTTPHeadersResponse ... type BlobSetHTTPHeadersResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bshhr BlobSetHTTPHeadersResponse) Response() *http.Response { return bshhr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bshhr BlobSetHTTPHeadersResponse) StatusCode() int { return bshhr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bshhr BlobSetHTTPHeadersResponse) Status() string { return bshhr.rawResponse.Status } // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. func (bshhr BlobSetHTTPHeadersResponse) BlobSequenceNumber() int64 { s := bshhr.rawResponse.Header.Get("x-ms-blob-sequence-number") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ClientRequestID returns the value for header x-ms-client-request-id. func (bshhr BlobSetHTTPHeadersResponse) ClientRequestID() string { return bshhr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bshhr BlobSetHTTPHeadersResponse) Date() time.Time { s := bshhr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bshhr BlobSetHTTPHeadersResponse) ErrorCode() string { return bshhr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bshhr BlobSetHTTPHeadersResponse) ETag() ETag { return ETag(bshhr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (bshhr BlobSetHTTPHeadersResponse) LastModified() time.Time { s := bshhr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bshhr BlobSetHTTPHeadersResponse) RequestID() string { return bshhr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bshhr BlobSetHTTPHeadersResponse) Version() string { return bshhr.rawResponse.Header.Get("x-ms-version") } // BlobSetMetadataResponse ... type BlobSetMetadataResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bsmr BlobSetMetadataResponse) Response() *http.Response { return bsmr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bsmr BlobSetMetadataResponse) StatusCode() int { return bsmr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bsmr BlobSetMetadataResponse) Status() string { return bsmr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bsmr BlobSetMetadataResponse) ClientRequestID() string { return bsmr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bsmr BlobSetMetadataResponse) Date() time.Time { s := bsmr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (bsmr BlobSetMetadataResponse) EncryptionKeySha256() string { return bsmr.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (bsmr BlobSetMetadataResponse) ErrorCode() string { return bsmr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bsmr BlobSetMetadataResponse) ETag() ETag { return ETag(bsmr.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (bsmr BlobSetMetadataResponse) IsServerEncrypted() string { return bsmr.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (bsmr BlobSetMetadataResponse) LastModified() time.Time { s := bsmr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bsmr BlobSetMetadataResponse) RequestID() string { return bsmr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bsmr BlobSetMetadataResponse) Version() string { return bsmr.rawResponse.Header.Get("x-ms-version") } // BlobSetTierResponse ... type BlobSetTierResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bstr BlobSetTierResponse) Response() *http.Response { return bstr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bstr BlobSetTierResponse) StatusCode() int { return bstr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bstr BlobSetTierResponse) Status() string { return bstr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bstr BlobSetTierResponse) ClientRequestID() string { return bstr.rawResponse.Header.Get("x-ms-client-request-id") } // ErrorCode returns the value for header x-ms-error-code. func (bstr BlobSetTierResponse) ErrorCode() string { return bstr.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (bstr BlobSetTierResponse) RequestID() string { return bstr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bstr BlobSetTierResponse) Version() string { return bstr.rawResponse.Header.Get("x-ms-version") } // BlobStartCopyFromURLResponse ... type BlobStartCopyFromURLResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bscfur BlobStartCopyFromURLResponse) Response() *http.Response { return bscfur.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bscfur BlobStartCopyFromURLResponse) StatusCode() int { return bscfur.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bscfur BlobStartCopyFromURLResponse) Status() string { return bscfur.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bscfur BlobStartCopyFromURLResponse) ClientRequestID() string { return bscfur.rawResponse.Header.Get("x-ms-client-request-id") } // CopyID returns the value for header x-ms-copy-id. func (bscfur BlobStartCopyFromURLResponse) CopyID() string { return bscfur.rawResponse.Header.Get("x-ms-copy-id") } // CopyStatus returns the value for header x-ms-copy-status. func (bscfur BlobStartCopyFromURLResponse) CopyStatus() CopyStatusType { return CopyStatusType(bscfur.rawResponse.Header.Get("x-ms-copy-status")) } // Date returns the value for header Date. func (bscfur BlobStartCopyFromURLResponse) Date() time.Time { s := bscfur.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bscfur BlobStartCopyFromURLResponse) ErrorCode() string { return bscfur.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bscfur BlobStartCopyFromURLResponse) ETag() ETag { return ETag(bscfur.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (bscfur BlobStartCopyFromURLResponse) LastModified() time.Time { s := bscfur.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bscfur BlobStartCopyFromURLResponse) RequestID() string { return bscfur.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bscfur BlobStartCopyFromURLResponse) Version() string { return bscfur.rawResponse.Header.Get("x-ms-version") } // BlobUndeleteResponse ... type BlobUndeleteResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bur BlobUndeleteResponse) Response() *http.Response { return bur.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bur BlobUndeleteResponse) StatusCode() int { return bur.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bur BlobUndeleteResponse) Status() string { return bur.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bur BlobUndeleteResponse) ClientRequestID() string { return bur.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (bur BlobUndeleteResponse) Date() time.Time { s := bur.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bur BlobUndeleteResponse) ErrorCode() string { return bur.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (bur BlobUndeleteResponse) RequestID() string { return bur.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bur BlobUndeleteResponse) Version() string { return bur.rawResponse.Header.Get("x-ms-version") } // Block - Represents a single block in a block blob. It describes the block's ID and size. type Block struct { // Name - The base64 encoded block ID. Name string `xml:"Name"` // Size - The block size in bytes. Size int32 `xml:"Size"` } // BlockBlobCommitBlockListResponse ... type BlockBlobCommitBlockListResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bbcblr BlockBlobCommitBlockListResponse) Response() *http.Response { return bbcblr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bbcblr BlockBlobCommitBlockListResponse) StatusCode() int { return bbcblr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bbcblr BlockBlobCommitBlockListResponse) Status() string { return bbcblr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bbcblr BlockBlobCommitBlockListResponse) ClientRequestID() string { return bbcblr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (bbcblr BlockBlobCommitBlockListResponse) ContentMD5() []byte { s := bbcblr.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (bbcblr BlockBlobCommitBlockListResponse) Date() time.Time { s := bbcblr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (bbcblr BlockBlobCommitBlockListResponse) EncryptionKeySha256() string { return bbcblr.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (bbcblr BlockBlobCommitBlockListResponse) ErrorCode() string { return bbcblr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bbcblr BlockBlobCommitBlockListResponse) ETag() ETag { return ETag(bbcblr.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (bbcblr BlockBlobCommitBlockListResponse) IsServerEncrypted() string { return bbcblr.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (bbcblr BlockBlobCommitBlockListResponse) LastModified() time.Time { s := bbcblr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bbcblr BlockBlobCommitBlockListResponse) RequestID() string { return bbcblr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bbcblr BlockBlobCommitBlockListResponse) Version() string { return bbcblr.rawResponse.Header.Get("x-ms-version") } // XMsContentCrc64 returns the value for header x-ms-content-crc64. func (bbcblr BlockBlobCommitBlockListResponse) XMsContentCrc64() []byte { s := bbcblr.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // BlockBlobStageBlockFromURLResponse ... type BlockBlobStageBlockFromURLResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bbsbfur BlockBlobStageBlockFromURLResponse) Response() *http.Response { return bbsbfur.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bbsbfur BlockBlobStageBlockFromURLResponse) StatusCode() int { return bbsbfur.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bbsbfur BlockBlobStageBlockFromURLResponse) Status() string { return bbsbfur.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bbsbfur BlockBlobStageBlockFromURLResponse) ClientRequestID() string { return bbsbfur.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (bbsbfur BlockBlobStageBlockFromURLResponse) ContentMD5() []byte { s := bbsbfur.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (bbsbfur BlockBlobStageBlockFromURLResponse) Date() time.Time { s := bbsbfur.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionKeySha256() string { return bbsbfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (bbsbfur BlockBlobStageBlockFromURLResponse) ErrorCode() string { return bbsbfur.rawResponse.Header.Get("x-ms-error-code") } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (bbsbfur BlockBlobStageBlockFromURLResponse) IsServerEncrypted() string { return bbsbfur.rawResponse.Header.Get("x-ms-request-server-encrypted") } // RequestID returns the value for header x-ms-request-id. func (bbsbfur BlockBlobStageBlockFromURLResponse) RequestID() string { return bbsbfur.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bbsbfur BlockBlobStageBlockFromURLResponse) Version() string { return bbsbfur.rawResponse.Header.Get("x-ms-version") } // XMsContentCrc64 returns the value for header x-ms-content-crc64. func (bbsbfur BlockBlobStageBlockFromURLResponse) XMsContentCrc64() []byte { s := bbsbfur.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // BlockBlobStageBlockResponse ... type BlockBlobStageBlockResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bbsbr BlockBlobStageBlockResponse) Response() *http.Response { return bbsbr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bbsbr BlockBlobStageBlockResponse) StatusCode() int { return bbsbr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bbsbr BlockBlobStageBlockResponse) Status() string { return bbsbr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bbsbr BlockBlobStageBlockResponse) ClientRequestID() string { return bbsbr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (bbsbr BlockBlobStageBlockResponse) ContentMD5() []byte { s := bbsbr.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (bbsbr BlockBlobStageBlockResponse) Date() time.Time { s := bbsbr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (bbsbr BlockBlobStageBlockResponse) EncryptionKeySha256() string { return bbsbr.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (bbsbr BlockBlobStageBlockResponse) ErrorCode() string { return bbsbr.rawResponse.Header.Get("x-ms-error-code") } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (bbsbr BlockBlobStageBlockResponse) IsServerEncrypted() string { return bbsbr.rawResponse.Header.Get("x-ms-request-server-encrypted") } // RequestID returns the value for header x-ms-request-id. func (bbsbr BlockBlobStageBlockResponse) RequestID() string { return bbsbr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bbsbr BlockBlobStageBlockResponse) Version() string { return bbsbr.rawResponse.Header.Get("x-ms-version") } // XMsContentCrc64 returns the value for header x-ms-content-crc64. func (bbsbr BlockBlobStageBlockResponse) XMsContentCrc64() []byte { s := bbsbr.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // BlockBlobUploadResponse ... type BlockBlobUploadResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (bbur BlockBlobUploadResponse) Response() *http.Response { return bbur.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bbur BlockBlobUploadResponse) StatusCode() int { return bbur.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bbur BlockBlobUploadResponse) Status() string { return bbur.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (bbur BlockBlobUploadResponse) ClientRequestID() string { return bbur.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (bbur BlockBlobUploadResponse) ContentMD5() []byte { s := bbur.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (bbur BlockBlobUploadResponse) Date() time.Time { s := bbur.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (bbur BlockBlobUploadResponse) EncryptionKeySha256() string { return bbur.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (bbur BlockBlobUploadResponse) ErrorCode() string { return bbur.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bbur BlockBlobUploadResponse) ETag() ETag { return ETag(bbur.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (bbur BlockBlobUploadResponse) IsServerEncrypted() string { return bbur.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (bbur BlockBlobUploadResponse) LastModified() time.Time { s := bbur.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bbur BlockBlobUploadResponse) RequestID() string { return bbur.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bbur BlockBlobUploadResponse) Version() string { return bbur.rawResponse.Header.Get("x-ms-version") } // BlockList ... type BlockList struct { rawResponse *http.Response CommittedBlocks []Block `xml:"CommittedBlocks>Block"` UncommittedBlocks []Block `xml:"UncommittedBlocks>Block"` } // Response returns the raw HTTP response object. func (bl BlockList) Response() *http.Response { return bl.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (bl BlockList) StatusCode() int { return bl.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (bl BlockList) Status() string { return bl.rawResponse.Status } // BlobContentLength returns the value for header x-ms-blob-content-length. func (bl BlockList) BlobContentLength() int64 { s := bl.rawResponse.Header.Get("x-ms-blob-content-length") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ClientRequestID returns the value for header x-ms-client-request-id. func (bl BlockList) ClientRequestID() string { return bl.rawResponse.Header.Get("x-ms-client-request-id") } // ContentType returns the value for header Content-Type. func (bl BlockList) ContentType() string { return bl.rawResponse.Header.Get("Content-Type") } // Date returns the value for header Date. func (bl BlockList) Date() time.Time { s := bl.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (bl BlockList) ErrorCode() string { return bl.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (bl BlockList) ETag() ETag { return ETag(bl.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (bl BlockList) LastModified() time.Time { s := bl.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (bl BlockList) RequestID() string { return bl.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (bl BlockList) Version() string { return bl.rawResponse.Header.Get("x-ms-version") } // BlockLookupList ... type BlockLookupList struct { // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"BlockList"` Committed []string `xml:"Committed"` Uncommitted []string `xml:"Uncommitted"` Latest []string `xml:"Latest"` } // ClearRange ... type ClearRange struct { Start int64 `xml:"Start"` End int64 `xml:"End"` } // ContainerAcquireLeaseResponse ... type ContainerAcquireLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (calr ContainerAcquireLeaseResponse) Response() *http.Response { return calr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (calr ContainerAcquireLeaseResponse) StatusCode() int { return calr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (calr ContainerAcquireLeaseResponse) Status() string { return calr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (calr ContainerAcquireLeaseResponse) ClientRequestID() string { return calr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (calr ContainerAcquireLeaseResponse) Date() time.Time { s := calr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (calr ContainerAcquireLeaseResponse) ErrorCode() string { return calr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (calr ContainerAcquireLeaseResponse) ETag() ETag { return ETag(calr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (calr ContainerAcquireLeaseResponse) LastModified() time.Time { s := calr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseID returns the value for header x-ms-lease-id. func (calr ContainerAcquireLeaseResponse) LeaseID() string { return calr.rawResponse.Header.Get("x-ms-lease-id") } // RequestID returns the value for header x-ms-request-id. func (calr ContainerAcquireLeaseResponse) RequestID() string { return calr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (calr ContainerAcquireLeaseResponse) Version() string { return calr.rawResponse.Header.Get("x-ms-version") } // ContainerBreakLeaseResponse ... type ContainerBreakLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (cblr ContainerBreakLeaseResponse) Response() *http.Response { return cblr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (cblr ContainerBreakLeaseResponse) StatusCode() int { return cblr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (cblr ContainerBreakLeaseResponse) Status() string { return cblr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (cblr ContainerBreakLeaseResponse) ClientRequestID() string { return cblr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (cblr ContainerBreakLeaseResponse) Date() time.Time { s := cblr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (cblr ContainerBreakLeaseResponse) ErrorCode() string { return cblr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (cblr ContainerBreakLeaseResponse) ETag() ETag { return ETag(cblr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (cblr ContainerBreakLeaseResponse) LastModified() time.Time { s := cblr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseTime returns the value for header x-ms-lease-time. func (cblr ContainerBreakLeaseResponse) LeaseTime() int32 { s := cblr.rawResponse.Header.Get("x-ms-lease-time") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 32) if err != nil { i = 0 } return int32(i) } // RequestID returns the value for header x-ms-request-id. func (cblr ContainerBreakLeaseResponse) RequestID() string { return cblr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (cblr ContainerBreakLeaseResponse) Version() string { return cblr.rawResponse.Header.Get("x-ms-version") } // ContainerChangeLeaseResponse ... type ContainerChangeLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (cclr ContainerChangeLeaseResponse) Response() *http.Response { return cclr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (cclr ContainerChangeLeaseResponse) StatusCode() int { return cclr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (cclr ContainerChangeLeaseResponse) Status() string { return cclr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (cclr ContainerChangeLeaseResponse) ClientRequestID() string { return cclr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (cclr ContainerChangeLeaseResponse) Date() time.Time { s := cclr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (cclr ContainerChangeLeaseResponse) ErrorCode() string { return cclr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (cclr ContainerChangeLeaseResponse) ETag() ETag { return ETag(cclr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (cclr ContainerChangeLeaseResponse) LastModified() time.Time { s := cclr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseID returns the value for header x-ms-lease-id. func (cclr ContainerChangeLeaseResponse) LeaseID() string { return cclr.rawResponse.Header.Get("x-ms-lease-id") } // RequestID returns the value for header x-ms-request-id. func (cclr ContainerChangeLeaseResponse) RequestID() string { return cclr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (cclr ContainerChangeLeaseResponse) Version() string { return cclr.rawResponse.Header.Get("x-ms-version") } // ContainerCreateResponse ... type ContainerCreateResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (ccr ContainerCreateResponse) Response() *http.Response { return ccr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (ccr ContainerCreateResponse) StatusCode() int { return ccr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (ccr ContainerCreateResponse) Status() string { return ccr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (ccr ContainerCreateResponse) ClientRequestID() string { return ccr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (ccr ContainerCreateResponse) Date() time.Time { s := ccr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (ccr ContainerCreateResponse) ErrorCode() string { return ccr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (ccr ContainerCreateResponse) ETag() ETag { return ETag(ccr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (ccr ContainerCreateResponse) LastModified() time.Time { s := ccr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (ccr ContainerCreateResponse) RequestID() string { return ccr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (ccr ContainerCreateResponse) Version() string { return ccr.rawResponse.Header.Get("x-ms-version") } // ContainerDeleteResponse ... type ContainerDeleteResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (cdr ContainerDeleteResponse) Response() *http.Response { return cdr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (cdr ContainerDeleteResponse) StatusCode() int { return cdr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (cdr ContainerDeleteResponse) Status() string { return cdr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (cdr ContainerDeleteResponse) ClientRequestID() string { return cdr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (cdr ContainerDeleteResponse) Date() time.Time { s := cdr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (cdr ContainerDeleteResponse) ErrorCode() string { return cdr.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (cdr ContainerDeleteResponse) RequestID() string { return cdr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (cdr ContainerDeleteResponse) Version() string { return cdr.rawResponse.Header.Get("x-ms-version") } // ContainerGetAccountInfoResponse ... type ContainerGetAccountInfoResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (cgair ContainerGetAccountInfoResponse) Response() *http.Response { return cgair.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (cgair ContainerGetAccountInfoResponse) StatusCode() int { return cgair.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (cgair ContainerGetAccountInfoResponse) Status() string { return cgair.rawResponse.Status } // AccountKind returns the value for header x-ms-account-kind. func (cgair ContainerGetAccountInfoResponse) AccountKind() AccountKindType { return AccountKindType(cgair.rawResponse.Header.Get("x-ms-account-kind")) } // ClientRequestID returns the value for header x-ms-client-request-id. func (cgair ContainerGetAccountInfoResponse) ClientRequestID() string { return cgair.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (cgair ContainerGetAccountInfoResponse) Date() time.Time { s := cgair.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (cgair ContainerGetAccountInfoResponse) ErrorCode() string { return cgair.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (cgair ContainerGetAccountInfoResponse) RequestID() string { return cgair.rawResponse.Header.Get("x-ms-request-id") } // SkuName returns the value for header x-ms-sku-name. func (cgair ContainerGetAccountInfoResponse) SkuName() SkuNameType { return SkuNameType(cgair.rawResponse.Header.Get("x-ms-sku-name")) } // Version returns the value for header x-ms-version. func (cgair ContainerGetAccountInfoResponse) Version() string { return cgair.rawResponse.Header.Get("x-ms-version") } // ContainerGetPropertiesResponse ... type ContainerGetPropertiesResponse struct { rawResponse *http.Response } // NewMetadata returns user-defined key/value pairs. func (cgpr ContainerGetPropertiesResponse) NewMetadata() Metadata { md := Metadata{} for k, v := range cgpr.rawResponse.Header { if len(k) > mdPrefixLen { if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { md[strings.ToLower(k[mdPrefixLen:])] = v[0] } } } return md } // Response returns the raw HTTP response object. func (cgpr ContainerGetPropertiesResponse) Response() *http.Response { return cgpr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (cgpr ContainerGetPropertiesResponse) StatusCode() int { return cgpr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (cgpr ContainerGetPropertiesResponse) Status() string { return cgpr.rawResponse.Status } // BlobPublicAccess returns the value for header x-ms-blob-public-access. func (cgpr ContainerGetPropertiesResponse) BlobPublicAccess() PublicAccessType { return PublicAccessType(cgpr.rawResponse.Header.Get("x-ms-blob-public-access")) } // ClientRequestID returns the value for header x-ms-client-request-id. func (cgpr ContainerGetPropertiesResponse) ClientRequestID() string { return cgpr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (cgpr ContainerGetPropertiesResponse) Date() time.Time { s := cgpr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (cgpr ContainerGetPropertiesResponse) ErrorCode() string { return cgpr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (cgpr ContainerGetPropertiesResponse) ETag() ETag { return ETag(cgpr.rawResponse.Header.Get("ETag")) } // HasImmutabilityPolicy returns the value for header x-ms-has-immutability-policy. func (cgpr ContainerGetPropertiesResponse) HasImmutabilityPolicy() string { return cgpr.rawResponse.Header.Get("x-ms-has-immutability-policy") } // HasLegalHold returns the value for header x-ms-has-legal-hold. func (cgpr ContainerGetPropertiesResponse) HasLegalHold() string { return cgpr.rawResponse.Header.Get("x-ms-has-legal-hold") } // LastModified returns the value for header Last-Modified. func (cgpr ContainerGetPropertiesResponse) LastModified() time.Time { s := cgpr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseDuration returns the value for header x-ms-lease-duration. func (cgpr ContainerGetPropertiesResponse) LeaseDuration() LeaseDurationType { return LeaseDurationType(cgpr.rawResponse.Header.Get("x-ms-lease-duration")) } // LeaseState returns the value for header x-ms-lease-state. func (cgpr ContainerGetPropertiesResponse) LeaseState() LeaseStateType { return LeaseStateType(cgpr.rawResponse.Header.Get("x-ms-lease-state")) } // LeaseStatus returns the value for header x-ms-lease-status. func (cgpr ContainerGetPropertiesResponse) LeaseStatus() LeaseStatusType { return LeaseStatusType(cgpr.rawResponse.Header.Get("x-ms-lease-status")) } // RequestID returns the value for header x-ms-request-id. func (cgpr ContainerGetPropertiesResponse) RequestID() string { return cgpr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (cgpr ContainerGetPropertiesResponse) Version() string { return cgpr.rawResponse.Header.Get("x-ms-version") } // ContainerItem - An Azure Storage container type ContainerItem struct { // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"Container"` Name string `xml:"Name"` Properties ContainerProperties `xml:"Properties"` Metadata Metadata `xml:"Metadata"` } // ContainerProperties - Properties of a container type ContainerProperties struct { LastModified time.Time `xml:"Last-Modified"` Etag ETag `xml:"Etag"` // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' LeaseStatus LeaseStatusType `xml:"LeaseStatus"` // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' LeaseState LeaseStateType `xml:"LeaseState"` // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' LeaseDuration LeaseDurationType `xml:"LeaseDuration"` // PublicAccess - Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' PublicAccess PublicAccessType `xml:"PublicAccess"` HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` HasLegalHold *bool `xml:"HasLegalHold"` } // MarshalXML implements the xml.Marshaler interface for ContainerProperties. func (cp ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { cp2 := (*containerProperties)(unsafe.Pointer(&cp)) return e.EncodeElement(*cp2, start) } // UnmarshalXML implements the xml.Unmarshaler interface for ContainerProperties. func (cp *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { cp2 := (*containerProperties)(unsafe.Pointer(cp)) return d.DecodeElement(cp2, &start) } // ContainerReleaseLeaseResponse ... type ContainerReleaseLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (crlr ContainerReleaseLeaseResponse) Response() *http.Response { return crlr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (crlr ContainerReleaseLeaseResponse) StatusCode() int { return crlr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (crlr ContainerReleaseLeaseResponse) Status() string { return crlr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (crlr ContainerReleaseLeaseResponse) ClientRequestID() string { return crlr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (crlr ContainerReleaseLeaseResponse) Date() time.Time { s := crlr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (crlr ContainerReleaseLeaseResponse) ErrorCode() string { return crlr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (crlr ContainerReleaseLeaseResponse) ETag() ETag { return ETag(crlr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (crlr ContainerReleaseLeaseResponse) LastModified() time.Time { s := crlr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (crlr ContainerReleaseLeaseResponse) RequestID() string { return crlr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (crlr ContainerReleaseLeaseResponse) Version() string { return crlr.rawResponse.Header.Get("x-ms-version") } // ContainerRenewLeaseResponse ... type ContainerRenewLeaseResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (crlr ContainerRenewLeaseResponse) Response() *http.Response { return crlr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (crlr ContainerRenewLeaseResponse) StatusCode() int { return crlr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (crlr ContainerRenewLeaseResponse) Status() string { return crlr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (crlr ContainerRenewLeaseResponse) ClientRequestID() string { return crlr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (crlr ContainerRenewLeaseResponse) Date() time.Time { s := crlr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (crlr ContainerRenewLeaseResponse) ErrorCode() string { return crlr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (crlr ContainerRenewLeaseResponse) ETag() ETag { return ETag(crlr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (crlr ContainerRenewLeaseResponse) LastModified() time.Time { s := crlr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseID returns the value for header x-ms-lease-id. func (crlr ContainerRenewLeaseResponse) LeaseID() string { return crlr.rawResponse.Header.Get("x-ms-lease-id") } // RequestID returns the value for header x-ms-request-id. func (crlr ContainerRenewLeaseResponse) RequestID() string { return crlr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (crlr ContainerRenewLeaseResponse) Version() string { return crlr.rawResponse.Header.Get("x-ms-version") } // ContainerSetAccessPolicyResponse ... type ContainerSetAccessPolicyResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (csapr ContainerSetAccessPolicyResponse) Response() *http.Response { return csapr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (csapr ContainerSetAccessPolicyResponse) StatusCode() int { return csapr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (csapr ContainerSetAccessPolicyResponse) Status() string { return csapr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (csapr ContainerSetAccessPolicyResponse) ClientRequestID() string { return csapr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (csapr ContainerSetAccessPolicyResponse) Date() time.Time { s := csapr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (csapr ContainerSetAccessPolicyResponse) ErrorCode() string { return csapr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (csapr ContainerSetAccessPolicyResponse) ETag() ETag { return ETag(csapr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (csapr ContainerSetAccessPolicyResponse) LastModified() time.Time { s := csapr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (csapr ContainerSetAccessPolicyResponse) RequestID() string { return csapr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (csapr ContainerSetAccessPolicyResponse) Version() string { return csapr.rawResponse.Header.Get("x-ms-version") } // ContainerSetMetadataResponse ... type ContainerSetMetadataResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (csmr ContainerSetMetadataResponse) Response() *http.Response { return csmr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (csmr ContainerSetMetadataResponse) StatusCode() int { return csmr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (csmr ContainerSetMetadataResponse) Status() string { return csmr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (csmr ContainerSetMetadataResponse) ClientRequestID() string { return csmr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (csmr ContainerSetMetadataResponse) Date() time.Time { s := csmr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (csmr ContainerSetMetadataResponse) ErrorCode() string { return csmr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (csmr ContainerSetMetadataResponse) ETag() ETag { return ETag(csmr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (csmr ContainerSetMetadataResponse) LastModified() time.Time { s := csmr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (csmr ContainerSetMetadataResponse) RequestID() string { return csmr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (csmr ContainerSetMetadataResponse) Version() string { return csmr.rawResponse.Header.Get("x-ms-version") } // CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access // resources in another domain. Web browsers implement a security restriction known as same-origin policy that // prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain // (the origin domain) to call APIs in another domain type CorsRule struct { // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS. AllowedOrigins string `xml:"AllowedOrigins"` // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) AllowedMethods string `xml:"AllowedMethods"` // AllowedHeaders - the request headers that the origin domain may specify on the CORS request. AllowedHeaders string `xml:"AllowedHeaders"` // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer ExposedHeaders string `xml:"ExposedHeaders"` // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request. MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"` } // DataLakeStorageError ... type DataLakeStorageError struct { // Error - The service error response object. Error *DataLakeStorageErrorError `xml:"error"` } // DataLakeStorageErrorError - The service error response object. type DataLakeStorageErrorError struct { // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"DataLakeStorageError_error"` // Code - The service error code. Code *string `xml:"Code"` // Message - The service error message. Message *string `xml:"Message"` } // DirectoryCreateResponse ... type DirectoryCreateResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (dcr DirectoryCreateResponse) Response() *http.Response { return dcr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (dcr DirectoryCreateResponse) StatusCode() int { return dcr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (dcr DirectoryCreateResponse) Status() string { return dcr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (dcr DirectoryCreateResponse) ClientRequestID() string { return dcr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentLength returns the value for header Content-Length. func (dcr DirectoryCreateResponse) ContentLength() int64 { s := dcr.rawResponse.Header.Get("Content-Length") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // Date returns the value for header Date. func (dcr DirectoryCreateResponse) Date() time.Time { s := dcr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ETag returns the value for header ETag. func (dcr DirectoryCreateResponse) ETag() ETag { return ETag(dcr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (dcr DirectoryCreateResponse) LastModified() time.Time { s := dcr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (dcr DirectoryCreateResponse) RequestID() string { return dcr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (dcr DirectoryCreateResponse) Version() string { return dcr.rawResponse.Header.Get("x-ms-version") } // DirectoryDeleteResponse ... type DirectoryDeleteResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (ddr DirectoryDeleteResponse) Response() *http.Response { return ddr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (ddr DirectoryDeleteResponse) StatusCode() int { return ddr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (ddr DirectoryDeleteResponse) Status() string { return ddr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (ddr DirectoryDeleteResponse) ClientRequestID() string { return ddr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (ddr DirectoryDeleteResponse) Date() time.Time { s := ddr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // Marker returns the value for header x-ms-continuation. func (ddr DirectoryDeleteResponse) Marker() string { return ddr.rawResponse.Header.Get("x-ms-continuation") } // RequestID returns the value for header x-ms-request-id. func (ddr DirectoryDeleteResponse) RequestID() string { return ddr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (ddr DirectoryDeleteResponse) Version() string { return ddr.rawResponse.Header.Get("x-ms-version") } // DirectoryGetAccessControlResponse ... type DirectoryGetAccessControlResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (dgacr DirectoryGetAccessControlResponse) Response() *http.Response { return dgacr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (dgacr DirectoryGetAccessControlResponse) StatusCode() int { return dgacr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (dgacr DirectoryGetAccessControlResponse) Status() string { return dgacr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (dgacr DirectoryGetAccessControlResponse) ClientRequestID() string { return dgacr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (dgacr DirectoryGetAccessControlResponse) Date() time.Time { s := dgacr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ETag returns the value for header ETag. func (dgacr DirectoryGetAccessControlResponse) ETag() ETag { return ETag(dgacr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (dgacr DirectoryGetAccessControlResponse) LastModified() time.Time { s := dgacr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (dgacr DirectoryGetAccessControlResponse) RequestID() string { return dgacr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (dgacr DirectoryGetAccessControlResponse) Version() string { return dgacr.rawResponse.Header.Get("x-ms-version") } // XMsACL returns the value for header x-ms-acl. func (dgacr DirectoryGetAccessControlResponse) XMsACL() string { return dgacr.rawResponse.Header.Get("x-ms-acl") } // XMsGroup returns the value for header x-ms-group. func (dgacr DirectoryGetAccessControlResponse) XMsGroup() string { return dgacr.rawResponse.Header.Get("x-ms-group") } // XMsOwner returns the value for header x-ms-owner. func (dgacr DirectoryGetAccessControlResponse) XMsOwner() string { return dgacr.rawResponse.Header.Get("x-ms-owner") } // XMsPermissions returns the value for header x-ms-permissions. func (dgacr DirectoryGetAccessControlResponse) XMsPermissions() string { return dgacr.rawResponse.Header.Get("x-ms-permissions") } // DirectoryRenameResponse ... type DirectoryRenameResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (drr DirectoryRenameResponse) Response() *http.Response { return drr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (drr DirectoryRenameResponse) StatusCode() int { return drr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (drr DirectoryRenameResponse) Status() string { return drr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (drr DirectoryRenameResponse) ClientRequestID() string { return drr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentLength returns the value for header Content-Length. func (drr DirectoryRenameResponse) ContentLength() int64 { s := drr.rawResponse.Header.Get("Content-Length") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // Date returns the value for header Date. func (drr DirectoryRenameResponse) Date() time.Time { s := drr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ETag returns the value for header ETag. func (drr DirectoryRenameResponse) ETag() ETag { return ETag(drr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (drr DirectoryRenameResponse) LastModified() time.Time { s := drr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // Marker returns the value for header x-ms-continuation. func (drr DirectoryRenameResponse) Marker() string { return drr.rawResponse.Header.Get("x-ms-continuation") } // RequestID returns the value for header x-ms-request-id. func (drr DirectoryRenameResponse) RequestID() string { return drr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (drr DirectoryRenameResponse) Version() string { return drr.rawResponse.Header.Get("x-ms-version") } // DirectorySetAccessControlResponse ... type DirectorySetAccessControlResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (dsacr DirectorySetAccessControlResponse) Response() *http.Response { return dsacr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (dsacr DirectorySetAccessControlResponse) StatusCode() int { return dsacr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (dsacr DirectorySetAccessControlResponse) Status() string { return dsacr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (dsacr DirectorySetAccessControlResponse) ClientRequestID() string { return dsacr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (dsacr DirectorySetAccessControlResponse) Date() time.Time { s := dsacr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ETag returns the value for header ETag. func (dsacr DirectorySetAccessControlResponse) ETag() ETag { return ETag(dsacr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (dsacr DirectorySetAccessControlResponse) LastModified() time.Time { s := dsacr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (dsacr DirectorySetAccessControlResponse) RequestID() string { return dsacr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (dsacr DirectorySetAccessControlResponse) Version() string { return dsacr.rawResponse.Header.Get("x-ms-version") } // downloadResponse - Wraps the response from the blobClient.Download method. type downloadResponse struct { rawResponse *http.Response } // NewMetadata returns user-defined key/value pairs. func (dr downloadResponse) NewMetadata() Metadata { md := Metadata{} for k, v := range dr.rawResponse.Header { if len(k) > mdPrefixLen { if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { md[strings.ToLower(k[mdPrefixLen:])] = v[0] } } } return md } // Response returns the raw HTTP response object. func (dr downloadResponse) Response() *http.Response { return dr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (dr downloadResponse) StatusCode() int { return dr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (dr downloadResponse) Status() string { return dr.rawResponse.Status } // Body returns the raw HTTP response object's Body. func (dr downloadResponse) Body() io.ReadCloser { return dr.rawResponse.Body } // AcceptRanges returns the value for header Accept-Ranges. func (dr downloadResponse) AcceptRanges() string { return dr.rawResponse.Header.Get("Accept-Ranges") } // BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. func (dr downloadResponse) BlobCommittedBlockCount() int32 { s := dr.rawResponse.Header.Get("x-ms-blob-committed-block-count") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 32) if err != nil { i = 0 } return int32(i) } // BlobContentMD5 returns the value for header x-ms-blob-content-md5. func (dr downloadResponse) BlobContentMD5() []byte { s := dr.rawResponse.Header.Get("x-ms-blob-content-md5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. func (dr downloadResponse) BlobSequenceNumber() int64 { s := dr.rawResponse.Header.Get("x-ms-blob-sequence-number") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // BlobType returns the value for header x-ms-blob-type. func (dr downloadResponse) BlobType() BlobType { return BlobType(dr.rawResponse.Header.Get("x-ms-blob-type")) } // CacheControl returns the value for header Cache-Control. func (dr downloadResponse) CacheControl() string { return dr.rawResponse.Header.Get("Cache-Control") } // ClientRequestID returns the value for header x-ms-client-request-id. func (dr downloadResponse) ClientRequestID() string { return dr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentCrc64 returns the value for header x-ms-content-crc64. func (dr downloadResponse) ContentCrc64() []byte { s := dr.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // ContentDisposition returns the value for header Content-Disposition. func (dr downloadResponse) ContentDisposition() string { return dr.rawResponse.Header.Get("Content-Disposition") } // ContentEncoding returns the value for header Content-Encoding. func (dr downloadResponse) ContentEncoding() string { return dr.rawResponse.Header.Get("Content-Encoding") } // ContentLanguage returns the value for header Content-Language. func (dr downloadResponse) ContentLanguage() string { return dr.rawResponse.Header.Get("Content-Language") } // ContentLength returns the value for header Content-Length. func (dr downloadResponse) ContentLength() int64 { s := dr.rawResponse.Header.Get("Content-Length") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ContentMD5 returns the value for header Content-MD5. func (dr downloadResponse) ContentMD5() []byte { s := dr.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // ContentRange returns the value for header Content-Range. func (dr downloadResponse) ContentRange() string { return dr.rawResponse.Header.Get("Content-Range") } // ContentType returns the value for header Content-Type. func (dr downloadResponse) ContentType() string { return dr.rawResponse.Header.Get("Content-Type") } // CopyCompletionTime returns the value for header x-ms-copy-completion-time. func (dr downloadResponse) CopyCompletionTime() time.Time { s := dr.rawResponse.Header.Get("x-ms-copy-completion-time") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // CopyID returns the value for header x-ms-copy-id. func (dr downloadResponse) CopyID() string { return dr.rawResponse.Header.Get("x-ms-copy-id") } // CopyProgress returns the value for header x-ms-copy-progress. func (dr downloadResponse) CopyProgress() string { return dr.rawResponse.Header.Get("x-ms-copy-progress") } // CopySource returns the value for header x-ms-copy-source. func (dr downloadResponse) CopySource() string { return dr.rawResponse.Header.Get("x-ms-copy-source") } // CopyStatus returns the value for header x-ms-copy-status. func (dr downloadResponse) CopyStatus() CopyStatusType { return CopyStatusType(dr.rawResponse.Header.Get("x-ms-copy-status")) } // CopyStatusDescription returns the value for header x-ms-copy-status-description. func (dr downloadResponse) CopyStatusDescription() string { return dr.rawResponse.Header.Get("x-ms-copy-status-description") } // Date returns the value for header Date. func (dr downloadResponse) Date() time.Time { s := dr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (dr downloadResponse) EncryptionKeySha256() string { return dr.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (dr downloadResponse) ErrorCode() string { return dr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (dr downloadResponse) ETag() ETag { return ETag(dr.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-server-encrypted. func (dr downloadResponse) IsServerEncrypted() string { return dr.rawResponse.Header.Get("x-ms-server-encrypted") } // LastModified returns the value for header Last-Modified. func (dr downloadResponse) LastModified() time.Time { s := dr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // LeaseDuration returns the value for header x-ms-lease-duration. func (dr downloadResponse) LeaseDuration() LeaseDurationType { return LeaseDurationType(dr.rawResponse.Header.Get("x-ms-lease-duration")) } // LeaseState returns the value for header x-ms-lease-state. func (dr downloadResponse) LeaseState() LeaseStateType { return LeaseStateType(dr.rawResponse.Header.Get("x-ms-lease-state")) } // LeaseStatus returns the value for header x-ms-lease-status. func (dr downloadResponse) LeaseStatus() LeaseStatusType { return LeaseStatusType(dr.rawResponse.Header.Get("x-ms-lease-status")) } // RequestID returns the value for header x-ms-request-id. func (dr downloadResponse) RequestID() string { return dr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (dr downloadResponse) Version() string { return dr.rawResponse.Header.Get("x-ms-version") } // GeoReplication - Geo-Replication information for the Secondary Storage Service type GeoReplication struct { // Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone' Status GeoReplicationStatusType `xml:"Status"` // LastSyncTime - A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads. LastSyncTime time.Time `xml:"LastSyncTime"` } // MarshalXML implements the xml.Marshaler interface for GeoReplication. func (gr GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { gr2 := (*geoReplication)(unsafe.Pointer(&gr)) return e.EncodeElement(*gr2, start) } // UnmarshalXML implements the xml.Unmarshaler interface for GeoReplication. func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { gr2 := (*geoReplication)(unsafe.Pointer(gr)) return d.DecodeElement(gr2, &start) } // KeyInfo - Key information type KeyInfo struct { // Start - The date-time the key is active in ISO 8601 UTC time Start string `xml:"Start"` // Expiry - The date-time the key expires in ISO 8601 UTC time Expiry string `xml:"Expiry"` } // ListBlobsFlatSegmentResponse - An enumeration of blobs type ListBlobsFlatSegmentResponse struct { rawResponse *http.Response // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"EnumerationResults"` ServiceEndpoint string `xml:"ServiceEndpoint,attr"` ContainerName string `xml:"ContainerName,attr"` Prefix *string `xml:"Prefix"` Marker *string `xml:"Marker"` MaxResults *int32 `xml:"MaxResults"` Segment BlobFlatListSegment `xml:"Blobs"` NextMarker Marker `xml:"NextMarker"` } // Response returns the raw HTTP response object. func (lbfsr ListBlobsFlatSegmentResponse) Response() *http.Response { return lbfsr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (lbfsr ListBlobsFlatSegmentResponse) StatusCode() int { return lbfsr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (lbfsr ListBlobsFlatSegmentResponse) Status() string { return lbfsr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (lbfsr ListBlobsFlatSegmentResponse) ClientRequestID() string { return lbfsr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentType returns the value for header Content-Type. func (lbfsr ListBlobsFlatSegmentResponse) ContentType() string { return lbfsr.rawResponse.Header.Get("Content-Type") } // Date returns the value for header Date. func (lbfsr ListBlobsFlatSegmentResponse) Date() time.Time { s := lbfsr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (lbfsr ListBlobsFlatSegmentResponse) ErrorCode() string { return lbfsr.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (lbfsr ListBlobsFlatSegmentResponse) RequestID() string { return lbfsr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (lbfsr ListBlobsFlatSegmentResponse) Version() string { return lbfsr.rawResponse.Header.Get("x-ms-version") } // ListBlobsHierarchySegmentResponse - An enumeration of blobs type ListBlobsHierarchySegmentResponse struct { rawResponse *http.Response // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"EnumerationResults"` ServiceEndpoint string `xml:"ServiceEndpoint,attr"` ContainerName string `xml:"ContainerName,attr"` Prefix *string `xml:"Prefix"` Marker *string `xml:"Marker"` MaxResults *int32 `xml:"MaxResults"` Delimiter *string `xml:"Delimiter"` Segment BlobHierarchyListSegment `xml:"Blobs"` NextMarker Marker `xml:"NextMarker"` } // Response returns the raw HTTP response object. func (lbhsr ListBlobsHierarchySegmentResponse) Response() *http.Response { return lbhsr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (lbhsr ListBlobsHierarchySegmentResponse) StatusCode() int { return lbhsr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (lbhsr ListBlobsHierarchySegmentResponse) Status() string { return lbhsr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (lbhsr ListBlobsHierarchySegmentResponse) ClientRequestID() string { return lbhsr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentType returns the value for header Content-Type. func (lbhsr ListBlobsHierarchySegmentResponse) ContentType() string { return lbhsr.rawResponse.Header.Get("Content-Type") } // Date returns the value for header Date. func (lbhsr ListBlobsHierarchySegmentResponse) Date() time.Time { s := lbhsr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (lbhsr ListBlobsHierarchySegmentResponse) ErrorCode() string { return lbhsr.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (lbhsr ListBlobsHierarchySegmentResponse) RequestID() string { return lbhsr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (lbhsr ListBlobsHierarchySegmentResponse) Version() string { return lbhsr.rawResponse.Header.Get("x-ms-version") } // ListContainersSegmentResponse - An enumeration of containers type ListContainersSegmentResponse struct { rawResponse *http.Response // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"EnumerationResults"` ServiceEndpoint string `xml:"ServiceEndpoint,attr"` Prefix *string `xml:"Prefix"` Marker *string `xml:"Marker"` MaxResults *int32 `xml:"MaxResults"` ContainerItems []ContainerItem `xml:"Containers>Container"` NextMarker Marker `xml:"NextMarker"` } // Response returns the raw HTTP response object. func (lcsr ListContainersSegmentResponse) Response() *http.Response { return lcsr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (lcsr ListContainersSegmentResponse) StatusCode() int { return lcsr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (lcsr ListContainersSegmentResponse) Status() string { return lcsr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (lcsr ListContainersSegmentResponse) ClientRequestID() string { return lcsr.rawResponse.Header.Get("x-ms-client-request-id") } // ErrorCode returns the value for header x-ms-error-code. func (lcsr ListContainersSegmentResponse) ErrorCode() string { return lcsr.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (lcsr ListContainersSegmentResponse) RequestID() string { return lcsr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (lcsr ListContainersSegmentResponse) Version() string { return lcsr.rawResponse.Header.Get("x-ms-version") } // Logging - Azure Analytics Logging settings. type Logging struct { // Version - The version of Storage Analytics to configure. Version string `xml:"Version"` // Delete - Indicates whether all delete requests should be logged. Delete bool `xml:"Delete"` // Read - Indicates whether all read requests should be logged. Read bool `xml:"Read"` // Write - Indicates whether all write requests should be logged. Write bool `xml:"Write"` RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"` } // Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs type Metrics struct { // Version - The version of Storage Analytics to configure. Version *string `xml:"Version"` // Enabled - Indicates whether metrics are enabled for the Blob service. Enabled bool `xml:"Enabled"` // IncludeAPIs - Indicates whether metrics should generate summary statistics for called API operations. IncludeAPIs *bool `xml:"IncludeAPIs"` RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` } // PageBlobClearPagesResponse ... type PageBlobClearPagesResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (pbcpr PageBlobClearPagesResponse) Response() *http.Response { return pbcpr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (pbcpr PageBlobClearPagesResponse) StatusCode() int { return pbcpr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (pbcpr PageBlobClearPagesResponse) Status() string { return pbcpr.rawResponse.Status } // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. func (pbcpr PageBlobClearPagesResponse) BlobSequenceNumber() int64 { s := pbcpr.rawResponse.Header.Get("x-ms-blob-sequence-number") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ClientRequestID returns the value for header x-ms-client-request-id. func (pbcpr PageBlobClearPagesResponse) ClientRequestID() string { return pbcpr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (pbcpr PageBlobClearPagesResponse) ContentMD5() []byte { s := pbcpr.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (pbcpr PageBlobClearPagesResponse) Date() time.Time { s := pbcpr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (pbcpr PageBlobClearPagesResponse) ErrorCode() string { return pbcpr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (pbcpr PageBlobClearPagesResponse) ETag() ETag { return ETag(pbcpr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (pbcpr PageBlobClearPagesResponse) LastModified() time.Time { s := pbcpr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (pbcpr PageBlobClearPagesResponse) RequestID() string { return pbcpr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (pbcpr PageBlobClearPagesResponse) Version() string { return pbcpr.rawResponse.Header.Get("x-ms-version") } // XMsContentCrc64 returns the value for header x-ms-content-crc64. func (pbcpr PageBlobClearPagesResponse) XMsContentCrc64() []byte { s := pbcpr.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // PageBlobCopyIncrementalResponse ... type PageBlobCopyIncrementalResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (pbcir PageBlobCopyIncrementalResponse) Response() *http.Response { return pbcir.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (pbcir PageBlobCopyIncrementalResponse) StatusCode() int { return pbcir.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (pbcir PageBlobCopyIncrementalResponse) Status() string { return pbcir.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (pbcir PageBlobCopyIncrementalResponse) ClientRequestID() string { return pbcir.rawResponse.Header.Get("x-ms-client-request-id") } // CopyID returns the value for header x-ms-copy-id. func (pbcir PageBlobCopyIncrementalResponse) CopyID() string { return pbcir.rawResponse.Header.Get("x-ms-copy-id") } // CopyStatus returns the value for header x-ms-copy-status. func (pbcir PageBlobCopyIncrementalResponse) CopyStatus() CopyStatusType { return CopyStatusType(pbcir.rawResponse.Header.Get("x-ms-copy-status")) } // Date returns the value for header Date. func (pbcir PageBlobCopyIncrementalResponse) Date() time.Time { s := pbcir.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (pbcir PageBlobCopyIncrementalResponse) ErrorCode() string { return pbcir.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (pbcir PageBlobCopyIncrementalResponse) ETag() ETag { return ETag(pbcir.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (pbcir PageBlobCopyIncrementalResponse) LastModified() time.Time { s := pbcir.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (pbcir PageBlobCopyIncrementalResponse) RequestID() string { return pbcir.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (pbcir PageBlobCopyIncrementalResponse) Version() string { return pbcir.rawResponse.Header.Get("x-ms-version") } // PageBlobCreateResponse ... type PageBlobCreateResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (pbcr PageBlobCreateResponse) Response() *http.Response { return pbcr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (pbcr PageBlobCreateResponse) StatusCode() int { return pbcr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (pbcr PageBlobCreateResponse) Status() string { return pbcr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (pbcr PageBlobCreateResponse) ClientRequestID() string { return pbcr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (pbcr PageBlobCreateResponse) ContentMD5() []byte { s := pbcr.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (pbcr PageBlobCreateResponse) Date() time.Time { s := pbcr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (pbcr PageBlobCreateResponse) EncryptionKeySha256() string { return pbcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (pbcr PageBlobCreateResponse) ErrorCode() string { return pbcr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (pbcr PageBlobCreateResponse) ETag() ETag { return ETag(pbcr.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (pbcr PageBlobCreateResponse) IsServerEncrypted() string { return pbcr.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (pbcr PageBlobCreateResponse) LastModified() time.Time { s := pbcr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (pbcr PageBlobCreateResponse) RequestID() string { return pbcr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (pbcr PageBlobCreateResponse) Version() string { return pbcr.rawResponse.Header.Get("x-ms-version") } // PageBlobResizeResponse ... type PageBlobResizeResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (pbrr PageBlobResizeResponse) Response() *http.Response { return pbrr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (pbrr PageBlobResizeResponse) StatusCode() int { return pbrr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (pbrr PageBlobResizeResponse) Status() string { return pbrr.rawResponse.Status } // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. func (pbrr PageBlobResizeResponse) BlobSequenceNumber() int64 { s := pbrr.rawResponse.Header.Get("x-ms-blob-sequence-number") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ClientRequestID returns the value for header x-ms-client-request-id. func (pbrr PageBlobResizeResponse) ClientRequestID() string { return pbrr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (pbrr PageBlobResizeResponse) Date() time.Time { s := pbrr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (pbrr PageBlobResizeResponse) ErrorCode() string { return pbrr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (pbrr PageBlobResizeResponse) ETag() ETag { return ETag(pbrr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (pbrr PageBlobResizeResponse) LastModified() time.Time { s := pbrr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (pbrr PageBlobResizeResponse) RequestID() string { return pbrr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (pbrr PageBlobResizeResponse) Version() string { return pbrr.rawResponse.Header.Get("x-ms-version") } // PageBlobUpdateSequenceNumberResponse ... type PageBlobUpdateSequenceNumberResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (pbusnr PageBlobUpdateSequenceNumberResponse) Response() *http.Response { return pbusnr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (pbusnr PageBlobUpdateSequenceNumberResponse) StatusCode() int { return pbusnr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (pbusnr PageBlobUpdateSequenceNumberResponse) Status() string { return pbusnr.rawResponse.Status } // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. func (pbusnr PageBlobUpdateSequenceNumberResponse) BlobSequenceNumber() int64 { s := pbusnr.rawResponse.Header.Get("x-ms-blob-sequence-number") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ClientRequestID returns the value for header x-ms-client-request-id. func (pbusnr PageBlobUpdateSequenceNumberResponse) ClientRequestID() string { return pbusnr.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (pbusnr PageBlobUpdateSequenceNumberResponse) Date() time.Time { s := pbusnr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (pbusnr PageBlobUpdateSequenceNumberResponse) ErrorCode() string { return pbusnr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (pbusnr PageBlobUpdateSequenceNumberResponse) ETag() ETag { return ETag(pbusnr.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (pbusnr PageBlobUpdateSequenceNumberResponse) LastModified() time.Time { s := pbusnr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (pbusnr PageBlobUpdateSequenceNumberResponse) RequestID() string { return pbusnr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (pbusnr PageBlobUpdateSequenceNumberResponse) Version() string { return pbusnr.rawResponse.Header.Get("x-ms-version") } // PageBlobUploadPagesFromURLResponse ... type PageBlobUploadPagesFromURLResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (pbupfur PageBlobUploadPagesFromURLResponse) Response() *http.Response { return pbupfur.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (pbupfur PageBlobUploadPagesFromURLResponse) StatusCode() int { return pbupfur.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (pbupfur PageBlobUploadPagesFromURLResponse) Status() string { return pbupfur.rawResponse.Status } // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. func (pbupfur PageBlobUploadPagesFromURLResponse) BlobSequenceNumber() int64 { s := pbupfur.rawResponse.Header.Get("x-ms-blob-sequence-number") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ContentMD5 returns the value for header Content-MD5. func (pbupfur PageBlobUploadPagesFromURLResponse) ContentMD5() []byte { s := pbupfur.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (pbupfur PageBlobUploadPagesFromURLResponse) Date() time.Time { s := pbupfur.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionKeySha256() string { return pbupfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (pbupfur PageBlobUploadPagesFromURLResponse) ErrorCode() string { return pbupfur.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (pbupfur PageBlobUploadPagesFromURLResponse) ETag() ETag { return ETag(pbupfur.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (pbupfur PageBlobUploadPagesFromURLResponse) IsServerEncrypted() string { return pbupfur.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (pbupfur PageBlobUploadPagesFromURLResponse) LastModified() time.Time { s := pbupfur.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (pbupfur PageBlobUploadPagesFromURLResponse) RequestID() string { return pbupfur.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (pbupfur PageBlobUploadPagesFromURLResponse) Version() string { return pbupfur.rawResponse.Header.Get("x-ms-version") } // XMsContentCrc64 returns the value for header x-ms-content-crc64. func (pbupfur PageBlobUploadPagesFromURLResponse) XMsContentCrc64() []byte { s := pbupfur.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // PageBlobUploadPagesResponse ... type PageBlobUploadPagesResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (pbupr PageBlobUploadPagesResponse) Response() *http.Response { return pbupr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (pbupr PageBlobUploadPagesResponse) StatusCode() int { return pbupr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (pbupr PageBlobUploadPagesResponse) Status() string { return pbupr.rawResponse.Status } // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. func (pbupr PageBlobUploadPagesResponse) BlobSequenceNumber() int64 { s := pbupr.rawResponse.Header.Get("x-ms-blob-sequence-number") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ClientRequestID returns the value for header x-ms-client-request-id. func (pbupr PageBlobUploadPagesResponse) ClientRequestID() string { return pbupr.rawResponse.Header.Get("x-ms-client-request-id") } // ContentMD5 returns the value for header Content-MD5. func (pbupr PageBlobUploadPagesResponse) ContentMD5() []byte { s := pbupr.rawResponse.Header.Get("Content-MD5") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // Date returns the value for header Date. func (pbupr PageBlobUploadPagesResponse) Date() time.Time { s := pbupr.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. func (pbupr PageBlobUploadPagesResponse) EncryptionKeySha256() string { return pbupr.rawResponse.Header.Get("x-ms-encryption-key-sha256") } // ErrorCode returns the value for header x-ms-error-code. func (pbupr PageBlobUploadPagesResponse) ErrorCode() string { return pbupr.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (pbupr PageBlobUploadPagesResponse) ETag() ETag { return ETag(pbupr.rawResponse.Header.Get("ETag")) } // IsServerEncrypted returns the value for header x-ms-request-server-encrypted. func (pbupr PageBlobUploadPagesResponse) IsServerEncrypted() string { return pbupr.rawResponse.Header.Get("x-ms-request-server-encrypted") } // LastModified returns the value for header Last-Modified. func (pbupr PageBlobUploadPagesResponse) LastModified() time.Time { s := pbupr.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (pbupr PageBlobUploadPagesResponse) RequestID() string { return pbupr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (pbupr PageBlobUploadPagesResponse) Version() string { return pbupr.rawResponse.Header.Get("x-ms-version") } // XMsContentCrc64 returns the value for header x-ms-content-crc64. func (pbupr PageBlobUploadPagesResponse) XMsContentCrc64() []byte { s := pbupr.rawResponse.Header.Get("x-ms-content-crc64") if s == "" { return nil } b, err := base64.StdEncoding.DecodeString(s) if err != nil { b = nil } return b } // PageList - the list of pages type PageList struct { rawResponse *http.Response PageRange []PageRange `xml:"PageRange"` ClearRange []ClearRange `xml:"ClearRange"` } // Response returns the raw HTTP response object. func (pl PageList) Response() *http.Response { return pl.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (pl PageList) StatusCode() int { return pl.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (pl PageList) Status() string { return pl.rawResponse.Status } // BlobContentLength returns the value for header x-ms-blob-content-length. func (pl PageList) BlobContentLength() int64 { s := pl.rawResponse.Header.Get("x-ms-blob-content-length") if s == "" { return -1 } i, err := strconv.ParseInt(s, 10, 64) if err != nil { i = 0 } return i } // ClientRequestID returns the value for header x-ms-client-request-id. func (pl PageList) ClientRequestID() string { return pl.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (pl PageList) Date() time.Time { s := pl.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (pl PageList) ErrorCode() string { return pl.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (pl PageList) ETag() ETag { return ETag(pl.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (pl PageList) LastModified() time.Time { s := pl.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (pl PageList) RequestID() string { return pl.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (pl PageList) Version() string { return pl.rawResponse.Header.Get("x-ms-version") } // PageRange ... type PageRange struct { Start int64 `xml:"Start"` End int64 `xml:"End"` } // RetentionPolicy - the retention policy which determines how long the associated data should persist type RetentionPolicy struct { // Enabled - Indicates whether a retention policy is enabled for the storage service Enabled bool `xml:"Enabled"` // Days - Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted Days *int32 `xml:"Days"` } // ServiceGetAccountInfoResponse ... type ServiceGetAccountInfoResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (sgair ServiceGetAccountInfoResponse) Response() *http.Response { return sgair.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (sgair ServiceGetAccountInfoResponse) StatusCode() int { return sgair.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (sgair ServiceGetAccountInfoResponse) Status() string { return sgair.rawResponse.Status } // AccountKind returns the value for header x-ms-account-kind. func (sgair ServiceGetAccountInfoResponse) AccountKind() AccountKindType { return AccountKindType(sgair.rawResponse.Header.Get("x-ms-account-kind")) } // ClientRequestID returns the value for header x-ms-client-request-id. func (sgair ServiceGetAccountInfoResponse) ClientRequestID() string { return sgair.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (sgair ServiceGetAccountInfoResponse) Date() time.Time { s := sgair.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (sgair ServiceGetAccountInfoResponse) ErrorCode() string { return sgair.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (sgair ServiceGetAccountInfoResponse) RequestID() string { return sgair.rawResponse.Header.Get("x-ms-request-id") } // SkuName returns the value for header x-ms-sku-name. func (sgair ServiceGetAccountInfoResponse) SkuName() SkuNameType { return SkuNameType(sgair.rawResponse.Header.Get("x-ms-sku-name")) } // Version returns the value for header x-ms-version. func (sgair ServiceGetAccountInfoResponse) Version() string { return sgair.rawResponse.Header.Get("x-ms-version") } // ServiceSetPropertiesResponse ... type ServiceSetPropertiesResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (sspr ServiceSetPropertiesResponse) Response() *http.Response { return sspr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (sspr ServiceSetPropertiesResponse) StatusCode() int { return sspr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (sspr ServiceSetPropertiesResponse) Status() string { return sspr.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (sspr ServiceSetPropertiesResponse) ClientRequestID() string { return sspr.rawResponse.Header.Get("x-ms-client-request-id") } // ErrorCode returns the value for header x-ms-error-code. func (sspr ServiceSetPropertiesResponse) ErrorCode() string { return sspr.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (sspr ServiceSetPropertiesResponse) RequestID() string { return sspr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (sspr ServiceSetPropertiesResponse) Version() string { return sspr.rawResponse.Header.Get("x-ms-version") } // SignedIdentifier - signed identifier type SignedIdentifier struct { // ID - a unique id ID string `xml:"Id"` AccessPolicy AccessPolicy `xml:"AccessPolicy"` } // SignedIdentifiers - Wraps the response from the containerClient.GetAccessPolicy method. type SignedIdentifiers struct { rawResponse *http.Response Items []SignedIdentifier `xml:"SignedIdentifier"` } // Response returns the raw HTTP response object. func (si SignedIdentifiers) Response() *http.Response { return si.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (si SignedIdentifiers) StatusCode() int { return si.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (si SignedIdentifiers) Status() string { return si.rawResponse.Status } // BlobPublicAccess returns the value for header x-ms-blob-public-access. func (si SignedIdentifiers) BlobPublicAccess() PublicAccessType { return PublicAccessType(si.rawResponse.Header.Get("x-ms-blob-public-access")) } // ClientRequestID returns the value for header x-ms-client-request-id. func (si SignedIdentifiers) ClientRequestID() string { return si.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (si SignedIdentifiers) Date() time.Time { s := si.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (si SignedIdentifiers) ErrorCode() string { return si.rawResponse.Header.Get("x-ms-error-code") } // ETag returns the value for header ETag. func (si SignedIdentifiers) ETag() ETag { return ETag(si.rawResponse.Header.Get("ETag")) } // LastModified returns the value for header Last-Modified. func (si SignedIdentifiers) LastModified() time.Time { s := si.rawResponse.Header.Get("Last-Modified") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // RequestID returns the value for header x-ms-request-id. func (si SignedIdentifiers) RequestID() string { return si.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (si SignedIdentifiers) Version() string { return si.rawResponse.Header.Get("x-ms-version") } // StaticWebsite - The properties that enable an account to host a static website type StaticWebsite struct { // Enabled - Indicates whether this account is hosting a static website Enabled bool `xml:"Enabled"` // IndexDocument - The default name of the index page under each directory IndexDocument *string `xml:"IndexDocument"` // ErrorDocument404Path - The absolute path of the custom 404 page ErrorDocument404Path *string `xml:"ErrorDocument404Path"` } // StorageServiceProperties - Storage Service Properties. type StorageServiceProperties struct { rawResponse *http.Response Logging *Logging `xml:"Logging"` HourMetrics *Metrics `xml:"HourMetrics"` MinuteMetrics *Metrics `xml:"MinuteMetrics"` // Cors - The set of CORS rules. Cors []CorsRule `xml:"Cors>CorsRule"` // DefaultServiceVersion - The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27 and all more recent versions DefaultServiceVersion *string `xml:"DefaultServiceVersion"` DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` StaticWebsite *StaticWebsite `xml:"StaticWebsite"` } // Response returns the raw HTTP response object. func (ssp StorageServiceProperties) Response() *http.Response { return ssp.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (ssp StorageServiceProperties) StatusCode() int { return ssp.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (ssp StorageServiceProperties) Status() string { return ssp.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (ssp StorageServiceProperties) ClientRequestID() string { return ssp.rawResponse.Header.Get("x-ms-client-request-id") } // ErrorCode returns the value for header x-ms-error-code. func (ssp StorageServiceProperties) ErrorCode() string { return ssp.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (ssp StorageServiceProperties) RequestID() string { return ssp.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (ssp StorageServiceProperties) Version() string { return ssp.rawResponse.Header.Get("x-ms-version") } // StorageServiceStats - Stats for the storage service. type StorageServiceStats struct { rawResponse *http.Response GeoReplication *GeoReplication `xml:"GeoReplication"` } // Response returns the raw HTTP response object. func (sss StorageServiceStats) Response() *http.Response { return sss.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (sss StorageServiceStats) StatusCode() int { return sss.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (sss StorageServiceStats) Status() string { return sss.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (sss StorageServiceStats) ClientRequestID() string { return sss.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (sss StorageServiceStats) Date() time.Time { s := sss.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (sss StorageServiceStats) ErrorCode() string { return sss.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (sss StorageServiceStats) RequestID() string { return sss.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (sss StorageServiceStats) Version() string { return sss.rawResponse.Header.Get("x-ms-version") } // SubmitBatchResponse - Wraps the response from the serviceClient.SubmitBatch method. type SubmitBatchResponse struct { rawResponse *http.Response } // Response returns the raw HTTP response object. func (sbr SubmitBatchResponse) Response() *http.Response { return sbr.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (sbr SubmitBatchResponse) StatusCode() int { return sbr.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (sbr SubmitBatchResponse) Status() string { return sbr.rawResponse.Status } // Body returns the raw HTTP response object's Body. func (sbr SubmitBatchResponse) Body() io.ReadCloser { return sbr.rawResponse.Body } // ContentType returns the value for header Content-Type. func (sbr SubmitBatchResponse) ContentType() string { return sbr.rawResponse.Header.Get("Content-Type") } // ErrorCode returns the value for header x-ms-error-code. func (sbr SubmitBatchResponse) ErrorCode() string { return sbr.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (sbr SubmitBatchResponse) RequestID() string { return sbr.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (sbr SubmitBatchResponse) Version() string { return sbr.rawResponse.Header.Get("x-ms-version") } // UserDelegationKey - A user delegation key type UserDelegationKey struct { rawResponse *http.Response // SignedOid - The Azure Active Directory object ID in GUID format. SignedOid string `xml:"SignedOid"` // SignedTid - The Azure Active Directory tenant ID in GUID format SignedTid string `xml:"SignedTid"` // SignedStart - The date-time the key is active SignedStart time.Time `xml:"SignedStart"` // SignedExpiry - The date-time the key expires SignedExpiry time.Time `xml:"SignedExpiry"` // SignedService - Abbreviation of the Azure Storage service that accepts the key SignedService string `xml:"SignedService"` // SignedVersion - The service version that created the key SignedVersion string `xml:"SignedVersion"` // Value - The key as a base64 string Value string `xml:"Value"` } // MarshalXML implements the xml.Marshaler interface for UserDelegationKey. func (udk UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { udk2 := (*userDelegationKey)(unsafe.Pointer(&udk)) return e.EncodeElement(*udk2, start) } // UnmarshalXML implements the xml.Unmarshaler interface for UserDelegationKey. func (udk *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { udk2 := (*userDelegationKey)(unsafe.Pointer(udk)) return d.DecodeElement(udk2, &start) } // Response returns the raw HTTP response object. func (udk UserDelegationKey) Response() *http.Response { return udk.rawResponse } // StatusCode returns the HTTP status code of the response, e.g. 200. func (udk UserDelegationKey) StatusCode() int { return udk.rawResponse.StatusCode } // Status returns the HTTP status message of the response, e.g. "200 OK". func (udk UserDelegationKey) Status() string { return udk.rawResponse.Status } // ClientRequestID returns the value for header x-ms-client-request-id. func (udk UserDelegationKey) ClientRequestID() string { return udk.rawResponse.Header.Get("x-ms-client-request-id") } // Date returns the value for header Date. func (udk UserDelegationKey) Date() time.Time { s := udk.rawResponse.Header.Get("Date") if s == "" { return time.Time{} } t, err := time.Parse(time.RFC1123, s) if err != nil { t = time.Time{} } return t } // ErrorCode returns the value for header x-ms-error-code. func (udk UserDelegationKey) ErrorCode() string { return udk.rawResponse.Header.Get("x-ms-error-code") } // RequestID returns the value for header x-ms-request-id. func (udk UserDelegationKey) RequestID() string { return udk.rawResponse.Header.Get("x-ms-request-id") } // Version returns the value for header x-ms-version. func (udk UserDelegationKey) Version() string { return udk.rawResponse.Header.Get("x-ms-version") } func init() { if reflect.TypeOf((*UserDelegationKey)(nil)).Elem().Size() != reflect.TypeOf((*userDelegationKey)(nil)).Elem().Size() { validateError(errors.New("size mismatch between UserDelegationKey and userDelegationKey")) } if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() { validateError(errors.New("size mismatch between AccessPolicy and accessPolicy")) } if reflect.TypeOf((*BlobProperties)(nil)).Elem().Size() != reflect.TypeOf((*blobProperties)(nil)).Elem().Size() { validateError(errors.New("size mismatch between BlobProperties and blobProperties")) } if reflect.TypeOf((*ContainerProperties)(nil)).Elem().Size() != reflect.TypeOf((*containerProperties)(nil)).Elem().Size() { validateError(errors.New("size mismatch between ContainerProperties and containerProperties")) } if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() { validateError(errors.New("size mismatch between GeoReplication and geoReplication")) } } const ( rfc3339Format = "2006-01-02T15:04:05Z" //This was wrong in the generated code, FYI ) // used to convert times from UTC to GMT before sending across the wire var gmt = time.FixedZone("GMT", 0) // internal type used for marshalling time in RFC1123 format type timeRFC1123 struct { time.Time } // MarshalText implements the encoding.TextMarshaler interface for timeRFC1123. func (t timeRFC1123) MarshalText() ([]byte, error) { return []byte(t.Format(time.RFC1123)), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC1123. func (t *timeRFC1123) UnmarshalText(data []byte) (err error) { t.Time, err = time.Parse(time.RFC1123, string(data)) return } // internal type used for marshalling time in RFC3339 format type timeRFC3339 struct { time.Time } // MarshalText implements the encoding.TextMarshaler interface for timeRFC3339. func (t timeRFC3339) MarshalText() ([]byte, error) { return []byte(t.Format(rfc3339Format)), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC3339. func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { t.Time, err = time.Parse(rfc3339Format, string(data)) return } // internal type used for marshalling base64 encoded strings type base64Encoded struct { b []byte } // MarshalText implements the encoding.TextMarshaler interface for base64Encoded. func (c base64Encoded) MarshalText() ([]byte, error) { return []byte(base64.StdEncoding.EncodeToString(c.b)), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface for base64Encoded. func (c *base64Encoded) UnmarshalText(data []byte) error { b, err := base64.StdEncoding.DecodeString(string(data)) if err != nil { return err } c.b = b return nil } // internal type used for marshalling type userDelegationKey struct { rawResponse *http.Response SignedOid string `xml:"SignedOid"` SignedTid string `xml:"SignedTid"` SignedStart timeRFC3339 `xml:"SignedStart"` SignedExpiry timeRFC3339 `xml:"SignedExpiry"` SignedService string `xml:"SignedService"` SignedVersion string `xml:"SignedVersion"` Value string `xml:"Value"` } // internal type used for marshalling type accessPolicy struct { Start timeRFC3339 `xml:"Start"` Expiry timeRFC3339 `xml:"Expiry"` Permission string `xml:"Permission"` } // internal type used for marshalling type blobProperties struct { // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"Properties"` CreationTime *timeRFC1123 `xml:"Creation-Time"` LastModified timeRFC1123 `xml:"Last-Modified"` Etag ETag `xml:"Etag"` ContentLength *int64 `xml:"Content-Length"` ContentType *string `xml:"Content-Type"` ContentEncoding *string `xml:"Content-Encoding"` ContentLanguage *string `xml:"Content-Language"` ContentMD5 base64Encoded `xml:"Content-MD5"` ContentDisposition *string `xml:"Content-Disposition"` CacheControl *string `xml:"Cache-Control"` BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` BlobType BlobType `xml:"BlobType"` LeaseStatus LeaseStatusType `xml:"LeaseStatus"` LeaseState LeaseStateType `xml:"LeaseState"` LeaseDuration LeaseDurationType `xml:"LeaseDuration"` CopyID *string `xml:"CopyId"` CopyStatus CopyStatusType `xml:"CopyStatus"` CopySource *string `xml:"CopySource"` CopyProgress *string `xml:"CopyProgress"` CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` CopyStatusDescription *string `xml:"CopyStatusDescription"` ServerEncrypted *bool `xml:"ServerEncrypted"` IncrementalCopy *bool `xml:"IncrementalCopy"` DestinationSnapshot *string `xml:"DestinationSnapshot"` DeletedTime *timeRFC1123 `xml:"DeletedTime"` RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` AccessTier AccessTierType `xml:"AccessTier"` AccessTierInferred *bool `xml:"AccessTierInferred"` ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` } // internal type used for marshalling type containerProperties struct { LastModified timeRFC1123 `xml:"Last-Modified"` Etag ETag `xml:"Etag"` LeaseStatus LeaseStatusType `xml:"LeaseStatus"` LeaseState LeaseStateType `xml:"LeaseState"` LeaseDuration LeaseDurationType `xml:"LeaseDuration"` PublicAccess PublicAccessType `xml:"PublicAccess"` HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` HasLegalHold *bool `xml:"HasLegalHold"` } // internal type used for marshalling type geoReplication struct { Status GeoReplicationStatusType `xml:"Status"` LastSyncTime timeRFC1123 `xml:"LastSyncTime"` } azure-storage-blob-go-0.10.0/azblob/zz_generated_page_blob.go000066400000000000000000001536501367515646300242040ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "encoding/base64" "encoding/xml" "github.com/Azure/azure-pipeline-go/pipeline" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" ) // pageBlobClient is the client for the PageBlob methods of the Azblob service. type pageBlobClient struct { managementClient } // newPageBlobClient creates an instance of the pageBlobClient client. func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { return pageBlobClient{newManagementClient(url, p)} } // ClearPages the Clear Pages operation clears a set of pages from a page blob // // contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more // information, see Setting // Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. // encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not // specified, encryption is performed with the root account encryption key. For more information, see Encryption at // Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be // provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the // encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key // header is provided. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it // has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to // operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this // header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this // header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is // specify this header value to operate only on a blob if it has not been modified since the specified date/time. // ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag // value to operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with // a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req) if err != nil { return nil, err } return resp.(*PageBlobClearPagesResponse), err } // clearPagesPreparer prepares the ClearPages request. func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "page") req.URL.RawQuery = params.Encode() req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) if rangeParameter != nil { req.Header.Set("x-ms-range", *rangeParameter) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifSequenceNumberLessThanOrEqualTo != nil { req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) } if ifSequenceNumberLessThan != nil { req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) } if ifSequenceNumberEqualTo != nil { req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-page-write", "clear") return req, nil } // clearPagesResponder handles the response to the ClearPages request. func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err } // CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. // The snapshot is copied such that only the differential changes between the previously copied snapshot are // transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or // copied from as usual. This API is supported since REST version 2016-05-31. // // copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that // specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob // must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is // expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a // matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded // in the analytics logs when storage analytics logging is enabled. func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobCopyIncrementalResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.copyIncrementalPreparer(copySource, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req) if err != nil { return nil, err } return resp.(*PageBlobCopyIncrementalResponse), err } // copyIncrementalPreparer prepares the CopyIncremental request. func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "incrementalcopy") req.URL.RawQuery = params.Encode() if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-copy-source", copySource) req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // copyIncrementalResponder handles the response to the CopyIncremental request. func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusAccepted) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err } // Create the Create operation creates a new page blob. // // contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page // blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is // expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. tier is optional. Indicates the tier to be set on the page blob. // blobContentType is optional. Sets the blob's content type. If specified, this property is stored with the blob and // returned with a read request. blobContentEncoding is optional. Sets the blob's content encoding. If specified, this // property is stored with the blob and returned with a read request. blobContentLanguage is optional. Set the blob's // content language. If specified, this property is stored with the blob and returned with a read request. // blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for // the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets the blob's cache // control. If specified, this property is stored with the blob and returned with a read request. metadata is optional. // Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value // pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from // the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules // for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. leaseID is if // specified, the operation only succeeds if the resource's lease is active and matches this ID. blobContentDisposition // is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies the encryption key to // use to encrypt the data provided in the request. If not specified, encryption is performed with the root account // encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the // SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. // encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is // "AES256". Must be provided if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header // value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify // this header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is // specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to // operate only on blobs without a matching value. blobSequenceNumber is set for page blobs only. The sequence number // is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 // and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in // the analytics logs when storage analytics logging is enabled. func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.createPreparer(contentLength, blobContentLength, timeout, tier, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) if err != nil { return nil, err } return resp.(*PageBlobCreateResponse), err } // createPreparer prepares the Create request. func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } req.URL.RawQuery = params.Encode() req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) if tier != PremiumPageBlobAccessTierNone { req.Header.Set("x-ms-access-tier", string(tier)) } if blobContentType != nil { req.Header.Set("x-ms-blob-content-type", *blobContentType) } if blobContentEncoding != nil { req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) } if blobContentLanguage != nil { req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) } if blobContentMD5 != nil { req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) } if blobCacheControl != nil { req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) } if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) } } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if blobContentDisposition != nil { req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) if blobSequenceNumber != nil { req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-blob-type", "PageBlob") return req, nil } // createResponder handles the response to the Create request. func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PageBlobCreateResponse{rawResponse: resp.Response()}, err } // GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a // page blob // // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to // retrieve. For more information on working with blob snapshots, see Creating // a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req) if err != nil { return nil, err } return resp.(*PageList), err } // getPageRangesPreparer prepares the GetPageRanges request. func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if snapshot != nil && len(*snapshot) > 0 { params.Set("snapshot", *snapshot) } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "pagelist") req.URL.RawQuery = params.Encode() if rangeParameter != nil { req.Header.Set("x-ms-range", *rangeParameter) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // getPageRangesResponder handles the response to the GetPageRanges request. func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &PageList{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // GetPageRangesDiff the Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were // changed between target blob and previous snapshot. // // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to // retrieve. For more information on working with blob snapshots, see Creating // a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot // parameter is a DateTime value that specifies that the response will contain only pages that were changed between // target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a // snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots // are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes // of the blob in the specified range. leaseID is if specified, the operation only succeeds if the resource's lease is // active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been // modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if // it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs // with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. // requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics // logs when storage analytics logging is enabled. func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req) if err != nil { return nil, err } return resp.(*PageList), err } // getPageRangesDiffPreparer prepares the GetPageRangesDiff request. func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if snapshot != nil && len(*snapshot) > 0 { params.Set("snapshot", *snapshot) } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } if prevsnapshot != nil && len(*prevsnapshot) > 0 { params.Set("prevsnapshot", *prevsnapshot) } params.Set("comp", "pagelist") req.URL.RawQuery = params.Encode() if rangeParameter != nil { req.Header.Set("x-ms-range", *rangeParameter) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // getPageRangesDiffResponder handles the response to the GetPageRangesDiff request. func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &PageList{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // Resize resize the Blob // // blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must // be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information, // see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the // data provided in the request. If not specified, encryption is performed with the root account encryption key. For // more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the // provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the // algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided // if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a // blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to // operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs // without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is // recorded in the analytics logs when storage analytics logging is enabled. func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.resizePreparer(blobContentLength, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req) if err != nil { return nil, err } return resp.(*PageBlobResizeResponse), err } // resizePreparer prepares the Resize request. func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "properties") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // resizeResponder handles the response to the Resize request. func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PageBlobResizeResponse{rawResponse: resp.Response()}, err } // UpdateSequenceNumber update the sequence number of the blob // // sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property // applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout // is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it // has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a // blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on // blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. // blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can use to // track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req) if err != nil { return nil, err } return resp.(*PageBlobUpdateSequenceNumberResponse), err } // updateSequenceNumberPreparer prepares the UpdateSequenceNumber request. func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "properties") req.URL.RawQuery = params.Encode() if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) if blobSequenceNumber != nil { req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request. func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err } // UploadPages the Upload Pages operation writes a range of pages to a page blob // // body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an // error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the // body, to be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to // be validated by the service. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. // encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not // specified, encryption is performed with the root account encryption key. For more information, see Encryption at // Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be // provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the // encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key // header is provided. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it // has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to // operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this // header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this // header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is // specify this header value to operate only on a blob if it has not been modified since the specified date/time. // ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag // value to operate only on blobs without a matching value. requestID is provides a client-generated, opaque value with // a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) { if err := validate([]validation{ {targetValue: body, constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, transactionalContentCrc64, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req) if err != nil { return nil, err } return resp.(*PageBlobUploadPagesResponse), err } // uploadPagesPreparer prepares the UploadPages request. func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, body) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "page") req.URL.RawQuery = params.Encode() req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) if transactionalContentMD5 != nil { req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) } if transactionalContentCrc64 != nil { req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) } if rangeParameter != nil { req.Header.Set("x-ms-range", *rangeParameter) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if ifSequenceNumberLessThanOrEqualTo != nil { req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) } if ifSequenceNumberLessThan != nil { req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) } if ifSequenceNumberEqualTo != nil { req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-page-write", "update") return req, nil } // uploadPagesResponder handles the response to the UploadPages request. func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err } // UploadPagesFromURL the Upload Pages operation writes a range of pages to a page blob where the contents are read // from a URL // // sourceURL is specify a URL to the copy source. sourceRange is bytes of source data in the specified range. The // length of this range should match the ContentLength header and x-ms-range/Range destination range header. // contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be // written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated // for the range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated // for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in // seconds. For more information, see Setting // Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt // the data provided in the request. If not specified, encryption is performed with the root account encryption key. // For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of // the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is // the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be // provided if the x-ms-encryption-key header is provided. leaseID is if specified, the operation only succeeds if the // resource's lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to // operate only on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is // specify this header value to operate only on a blob if it has a sequence number less than the specified. // ifSequenceNumberEqualTo is specify this header value to operate only on a blob if it has the specified sequence // number. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the // specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been // modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching // value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. sourceIfModifiedSince // is specify this header value to operate only on a blob if it has been modified since the specified date/time. // sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the // specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value. // sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides // a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage // analytics logging is enabled. func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesFromURLResponder}, req) if err != nil { return nil, err } return resp.(*PageBlobUploadPagesFromURLResponse), err } // uploadPagesFromURLPreparer prepares the UploadPagesFromURL request. func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "page") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-copy-source", sourceURL) req.Header.Set("x-ms-source-range", sourceRange) if sourceContentMD5 != nil { req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) } if sourceContentcrc64 != nil { req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) } req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) req.Header.Set("x-ms-range", rangeParameter) if encryptionKey != nil { req.Header.Set("x-ms-encryption-key", *encryptionKey) } if encryptionKeySha256 != nil { req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) } if encryptionAlgorithm != EncryptionAlgorithmNone { req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } if ifSequenceNumberLessThanOrEqualTo != nil { req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) } if ifSequenceNumberLessThan != nil { req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) } if ifSequenceNumberEqualTo != nil { req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } if ifUnmodifiedSince != nil { req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if ifMatch != nil { req.Header.Set("If-Match", string(*ifMatch)) } if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } if sourceIfModifiedSince != nil { req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfUnmodifiedSince != nil { req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) } if sourceIfMatch != nil { req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) } if sourceIfNoneMatch != nil { req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } req.Header.Set("x-ms-page-write", "update") return req, nil } // uploadPagesFromURLResponder handles the response to the UploadPagesFromURL request. func (client pageBlobClient) uploadPagesFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusCreated) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PageBlobUploadPagesFromURLResponse{rawResponse: resp.Response()}, err } azure-storage-blob-go-0.10.0/azblob/zz_generated_responder_policy.go000066400000000000000000000044531367515646300256460ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "bytes" "context" "encoding/xml" "github.com/Azure/azure-pipeline-go/pipeline" "io/ioutil" ) type responder func(resp pipeline.Response) (result pipeline.Response, err error) // ResponderPolicyFactory is a Factory capable of creating a responder pipeline. type responderPolicyFactory struct { responder responder } // New creates a responder policy factory. func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { return responderPolicy{next: next, responder: arpf.responder} } type responderPolicy struct { next pipeline.Policy responder responder } // Do sends the request to the service and validates/deserializes the HTTP response. func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { resp, err := arp.next.Do(ctx, request) if err != nil { return resp, err } return arp.responder(resp) } // validateResponse checks an HTTP response's status code against a legal set of codes. // If the response code is not legal, then validateResponse reads all of the response's body // (containing error information) and returns a response error. func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { if resp == nil { return NewResponseError(nil, nil, "nil response") } responseCode := resp.Response().StatusCode for _, i := range successStatusCodes { if i == responseCode { return nil } } // only close the body in the failure case. in the // success case responders will close the body as required. defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return err } // the service code, description and details will be populated during unmarshalling responseError := NewResponseError(nil, resp.Response(), resp.Response().Status) if len(b) > 0 { if err = xml.Unmarshal(b, &responseError); err != nil { return NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return responseError } // removes any BOM from the byte slice func removeBOM(b []byte) []byte { // UTF8 return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) } azure-storage-blob-go-0.10.0/azblob/zz_generated_response_error.go000066400000000000000000000067111367515646300253340ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "bytes" "fmt" "github.com/Azure/azure-pipeline-go/pipeline" "net" "net/http" ) // if you want to provide custom error handling set this variable to your constructor function var responseErrorFactory func(cause error, response *http.Response, description string) error // ResponseError identifies a responder-generated network or response parsing error. type ResponseError interface { // Error exposes the Error(), Temporary() and Timeout() methods. net.Error // Includes the Go error interface // Response returns the HTTP response. You may examine this but you should not modify it. Response() *http.Response } // NewResponseError creates an error object that implements the error interface. func NewResponseError(cause error, response *http.Response, description string) error { if responseErrorFactory != nil { return responseErrorFactory(cause, response, description) } return &responseError{ ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), response: response, description: description, } } // responseError is the internal struct that implements the public ResponseError interface. type responseError struct { pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause response *http.Response description string } // Error implements the error interface's Error method to return a string representation of the error. func (e *responseError) Error() string { b := &bytes.Buffer{} fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode) fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description) s := b.String() return e.ErrorNode.Error(s) } // Response implements the ResponseError interface's method to return the HTTP response. func (e *responseError) Response() *http.Response { return e.response } // RFC7807 PROBLEM ------------------------------------------------------------------------------------ // RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members. /*type RFC7807Problem struct { // Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation). typeURI string // Should default to "about:blank" // Optional: Short, human-readable summary (maybe localized). title string // Optional: HTTP status code generated by the origin server status int // Optional: Human-readable explanation for this problem occurance. // Should help client correct the problem. Clients should NOT parse this string. detail string // Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced). instance string } // NewRFC7807Problem ... func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error { return &RFC7807Problem{ typeURI: typeURI, status: status, title: fmt.Sprintf(titleFormat, a...), } } // Error returns the error information as a string. func (e *RFC7807Problem) Error() string { return e.title } // TypeURI ... func (e *RFC7807Problem) TypeURI() string { if e.typeURI == "" { e.typeURI = "about:blank" } return e.typeURI } // Members ... func (e *RFC7807Problem) Members() (status int, title, detail, instance string) { return e.status, e.title, e.detail, e.instance }*/ azure-storage-blob-go-0.10.0/azblob/zz_generated_service.go000066400000000000000000000545421367515646300237320ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "bytes" "context" "encoding/xml" "github.com/Azure/azure-pipeline-go/pipeline" "io" "io/ioutil" "net/http" "net/url" "strconv" ) // serviceClient is the client for the Service methods of the Azblob service. type serviceClient struct { managementClient } // newServiceClient creates an instance of the serviceClient client. func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { return serviceClient{newManagementClient(url, p)} } // GetAccountInfo returns the sku name and account kind func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { req, err := client.getAccountInfoPreparer() if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) if err != nil { return nil, err } return resp.(*ServiceGetAccountInfoResponse), err } // getAccountInfoPreparer prepares the GetAccountInfo request. func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() params.Set("restype", "account") params.Set("comp", "properties") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) return req, nil } // getAccountInfoResponder handles the response to the GetAccountInfo request. func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err } // GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics // and CORS (Cross-Origin Resource Sharing) rules. // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getPropertiesPreparer(timeout, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) if err != nil { return nil, err } return resp.(*StorageServiceProperties), err } // getPropertiesPreparer prepares the GetProperties request. func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "service") params.Set("comp", "properties") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // getPropertiesResponder handles the response to the GetProperties request. func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &StorageServiceProperties{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the // secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getStatisticsPreparer(timeout, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req) if err != nil { return nil, err } return resp.(*StorageServiceStats), err } // getStatisticsPreparer prepares the GetStatistics request. func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "service") params.Set("comp", "stats") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // getStatisticsResponder handles the response to the GetStatistics request. func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &StorageServiceStats{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // GetUserDelegationKey retrieves a user delegation key for the Blob service. This is only a valid operation when using // bearer token authentication. // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, timeout *int32, requestID *string) (*UserDelegationKey, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.getUserDelegationKeyPreparer(keyInfo, timeout, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getUserDelegationKeyResponder}, req) if err != nil { return nil, err } return resp.(*UserDelegationKey), err } // getUserDelegationKeyPreparer prepares the GetUserDelegationKey request. func (client serviceClient) getUserDelegationKeyPreparer(keyInfo KeyInfo, timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("POST", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "service") params.Set("comp", "userdelegationkey") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } b, err := xml.Marshal(keyInfo) if err != nil { return req, pipeline.NewError(err, "failed to marshal request body") } req.Header.Set("Content-Type", "application/xml") err = req.SetBody(bytes.NewReader(b)) if err != nil { return req, pipeline.NewError(err, "failed to set request body") } return req, nil } // getUserDelegationKeyResponder handles the response to the GetUserDelegationKey request. func (client serviceClient) getUserDelegationKeyResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &UserDelegationKey{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified // account // // prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a // string value that identifies the portion of the list of containers to be returned with the next listing operation. // The operation returns the NextMarker value within the response body if the listing operation did not return all // containers remaining to be listed with the current page. The NextMarker value can be used as the value for the // marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the // client. maxresults is specifies the maximum number of containers to return. If the request does not specify // maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the // remainder of the results. For this reason, it is possible that the service will return fewer results than specified // by maxresults, or than the default of 5000. include is include this parameter to specify that the container's // metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For // more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) { if err := validate([]validation{ {targetValue: maxresults, constraints: []constraint{{target: "maxresults", name: null, rule: false, chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req) if err != nil { return nil, err } return resp.(*ListContainersSegmentResponse), err } // listContainersSegmentPreparer prepares the ListContainersSegment request. func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if prefix != nil && len(*prefix) > 0 { params.Set("prefix", *prefix) } if marker != nil && len(*marker) > 0 { params.Set("marker", *marker) } if maxresults != nil { params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) } if include != ListContainersIncludeNone { params.Set("include", string(include)) } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "list") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // listContainersSegmentResponder handles the response to the ListContainersSegment request. func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } result := &ListContainersSegmentResponse{rawResponse: resp.Response()} if err != nil { return result, err } defer resp.Response().Body.Close() b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } if len(b) > 0 { b = removeBOM(b) err = xml.Unmarshal(b, result) if err != nil { return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") } } return result, nil } // SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage // Analytics and CORS (Cross-Origin Resource Sharing) rules // // storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds. // For more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) { if err := validate([]validation{ {targetValue: storageServiceProperties, constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true, chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, }}, }}, {target: "storageServiceProperties.HourMetrics", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, }}, }}, {target: "storageServiceProperties.MinuteMetrics", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, }}, }}, {target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false, chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, }}}}, {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req) if err != nil { return nil, err } return resp.(*ServiceSetPropertiesResponse), err } // setPropertiesPreparer prepares the SetProperties request. func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("restype", "service") params.Set("comp", "properties") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } b, err := xml.Marshal(storageServiceProperties) if err != nil { return req, pipeline.NewError(err, "failed to marshal request body") } req.Header.Set("Content-Type", "application/xml") err = req.SetBody(bytes.NewReader(b)) if err != nil { return req, pipeline.NewError(err, "failed to set request body") } return req, nil } // setPropertiesResponder handles the response to the SetProperties request. func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK, http.StatusAccepted) if resp == nil { return nil, err } io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err } // SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request. // // body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an // error.contentLength is the length of the request. multipartContentType is required. The value of this header must be // multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_ timeout is the // timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. func (client serviceClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) { if err := validate([]validation{ {targetValue: body, constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID) if err != nil { return nil, err } resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req) if err != nil { return nil, err } return resp.(*SubmitBatchResponse), err } // submitBatchPreparer prepares the SubmitBatch request. func (client serviceClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("POST", client.url, body) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "batch") req.URL.RawQuery = params.Encode() req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) req.Header.Set("Content-Type", multipartContentType) req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } return req, nil } // submitBatchResponder handles the response to the SubmitBatch request. func (client serviceClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) { err := validateResponse(resp, http.StatusOK) if resp == nil { return nil, err } return &SubmitBatchResponse{rawResponse: resp.Response()}, err } azure-storage-blob-go-0.10.0/azblob/zz_generated_validation.go000066400000000000000000000240051367515646300244130ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "fmt" "github.com/Azure/azure-pipeline-go/pipeline" "reflect" "regexp" "strings" ) // Constraint stores constraint name, target field name // Rule and chain validations. type constraint struct { // Target field name for validation. target string // Constraint name e.g. minLength, MaxLength, Pattern, etc. name string // Rule for constraint e.g. greater than 10, less than 5 etc. rule interface{} // Chain validations for struct type chain []constraint } // Validation stores parameter-wise validation. type validation struct { targetValue interface{} constraints []constraint } // Constraint list const ( empty = "Empty" null = "Null" readOnly = "ReadOnly" pattern = "Pattern" maxLength = "MaxLength" minLength = "MinLength" maxItems = "MaxItems" minItems = "MinItems" multipleOf = "MultipleOf" uniqueItems = "UniqueItems" inclusiveMaximum = "InclusiveMaximum" exclusiveMaximum = "ExclusiveMaximum" exclusiveMinimum = "ExclusiveMinimum" inclusiveMinimum = "InclusiveMinimum" ) // Validate method validates constraints on parameter // passed in validation array. func validate(m []validation) error { for _, item := range m { v := reflect.ValueOf(item.targetValue) for _, constraint := range item.constraints { var err error switch v.Kind() { case reflect.Ptr: err = validatePtr(v, constraint) case reflect.String: err = validateString(v, constraint) case reflect.Struct: err = validateStruct(v, constraint) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: err = validateInt(v, constraint) case reflect.Float32, reflect.Float64: err = validateFloat(v, constraint) case reflect.Array, reflect.Slice, reflect.Map: err = validateArrayMap(v, constraint) default: err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) } if err != nil { return err } } } return nil } func validateStruct(x reflect.Value, v constraint, name ...string) error { //Get field name from target name which is in format a.b.c s := strings.Split(v.target, ".") f := x.FieldByName(s[len(s)-1]) if isZero(f) { return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target)) } err := validate([]validation{ { targetValue: getInterfaceValue(f), constraints: []constraint{v}, }, }) return err } func validatePtr(x reflect.Value, v constraint) error { if v.name == readOnly { if !x.IsNil() { return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") } return nil } if x.IsNil() { return checkNil(x, v) } if v.chain != nil { return validate([]validation{ { targetValue: getInterfaceValue(x.Elem()), constraints: v.chain, }, }) } return nil } func validateInt(x reflect.Value, v constraint) error { i := x.Int() r, ok := v.rule.(int) if !ok { return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) } switch v.name { case multipleOf: if i%int64(r) != 0 { return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) } case exclusiveMinimum: if i <= int64(r) { return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) } case exclusiveMaximum: if i >= int64(r) { return createError(x, v, fmt.Sprintf("value must be less than %v", r)) } case inclusiveMinimum: if i < int64(r) { return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) } case inclusiveMaximum: if i > int64(r) { return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) } default: return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name)) } return nil } func validateFloat(x reflect.Value, v constraint) error { f := x.Float() r, ok := v.rule.(float64) if !ok { return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule)) } switch v.name { case exclusiveMinimum: if f <= r { return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) } case exclusiveMaximum: if f >= r { return createError(x, v, fmt.Sprintf("value must be less than %v", r)) } case inclusiveMinimum: if f < r { return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) } case inclusiveMaximum: if f > r { return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) } default: return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name)) } return nil } func validateString(x reflect.Value, v constraint) error { s := x.String() switch v.name { case empty: if len(s) == 0 { return checkEmpty(x, v) } case pattern: reg, err := regexp.Compile(v.rule.(string)) if err != nil { return createError(x, v, err.Error()) } if !reg.MatchString(s) { return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule)) } case maxLength: if _, ok := v.rule.(int); !ok { return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) } if len(s) > v.rule.(int) { return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule)) } case minLength: if _, ok := v.rule.(int); !ok { return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) } if len(s) < v.rule.(int) { return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule)) } case readOnly: if len(s) > 0 { return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") } default: return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name)) } if v.chain != nil { return validate([]validation{ { targetValue: getInterfaceValue(x), constraints: v.chain, }, }) } return nil } func validateArrayMap(x reflect.Value, v constraint) error { switch v.name { case null: if x.IsNil() { return checkNil(x, v) } case empty: if x.IsNil() || x.Len() == 0 { return checkEmpty(x, v) } case maxItems: if _, ok := v.rule.(int); !ok { return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) } if x.Len() > v.rule.(int) { return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len())) } case minItems: if _, ok := v.rule.(int); !ok { return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) } if x.Len() < v.rule.(int) { return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len())) } case uniqueItems: if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { if !checkForUniqueInArray(x) { return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) } } else if x.Kind() == reflect.Map { if !checkForUniqueInMap(x) { return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) } } else { return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind())) } case readOnly: if x.Len() != 0 { return createError(x, v, "readonly parameter; must send as nil or empty in request") } case pattern: reg, err := regexp.Compile(v.rule.(string)) if err != nil { return createError(x, v, err.Error()) } keys := x.MapKeys() for _, k := range keys { if !reg.MatchString(k.String()) { return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule)) } } default: return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name)) } if v.chain != nil { return validate([]validation{ { targetValue: getInterfaceValue(x), constraints: v.chain, }, }) } return nil } func checkNil(x reflect.Value, v constraint) error { if _, ok := v.rule.(bool); !ok { return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) } if v.rule.(bool) { return createError(x, v, "value can not be null; required parameter") } return nil } func checkEmpty(x reflect.Value, v constraint) error { if _, ok := v.rule.(bool); !ok { return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) } if v.rule.(bool) { return createError(x, v, "value can not be null or empty; required parameter") } return nil } func checkForUniqueInArray(x reflect.Value) bool { if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { return false } arrOfInterface := make([]interface{}, x.Len()) for i := 0; i < x.Len(); i++ { arrOfInterface[i] = x.Index(i).Interface() } m := make(map[interface{}]bool) for _, val := range arrOfInterface { if m[val] { return false } m[val] = true } return true } func checkForUniqueInMap(x reflect.Value) bool { if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { return false } mapOfInterface := make(map[interface{}]interface{}, x.Len()) keys := x.MapKeys() for _, k := range keys { mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() } m := make(map[interface{}]bool) for _, val := range mapOfInterface { if m[val] { return false } m[val] = true } return true } func getInterfaceValue(x reflect.Value) interface{} { if x.Kind() == reflect.Invalid { return nil } return x.Interface() } func isZero(x interface{}) bool { return x == reflect.Zero(reflect.TypeOf(x)).Interface() } func createError(x reflect.Value, v constraint, message string) error { return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s", v.target, v.name, getInterfaceValue(x), message)) } azure-storage-blob-go-0.10.0/azblob/zz_generated_version.go000066400000000000000000000006701367515646300237500ustar00rootroot00000000000000package azblob // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { return "Azure-SDK-For-Go/0.0.0 azblob/2019-02-02" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { return "0.0.0" } azure-storage-blob-go-0.10.0/azblob/zz_response_helpers.go000066400000000000000000000161421367515646300236260ustar00rootroot00000000000000package azblob import ( "context" "io" "net/http" "time" ) // BlobHTTPHeaders contains read/writeable blob properties. type BlobHTTPHeaders struct { ContentType string ContentMD5 []byte ContentEncoding string ContentLanguage string ContentDisposition string CacheControl string } // NewHTTPHeaders returns the user-modifiable properties for this blob. func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders { return BlobHTTPHeaders{ ContentType: bgpr.ContentType(), ContentEncoding: bgpr.ContentEncoding(), ContentLanguage: bgpr.ContentLanguage(), ContentDisposition: bgpr.ContentDisposition(), CacheControl: bgpr.CacheControl(), ContentMD5: bgpr.ContentMD5(), } } /////////////////////////////////////////////////////////////////////////////// // NewHTTPHeaders returns the user-modifiable properties for this blob. func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders { return BlobHTTPHeaders{ ContentType: dr.ContentType(), ContentEncoding: dr.ContentEncoding(), ContentLanguage: dr.ContentLanguage(), ContentDisposition: dr.ContentDisposition(), CacheControl: dr.CacheControl(), ContentMD5: dr.ContentMD5(), } } /////////////////////////////////////////////////////////////////////////////// // DownloadResponse wraps AutoRest generated DownloadResponse and helps to provide info for retry. type DownloadResponse struct { r *downloadResponse ctx context.Context b BlobURL getInfo HTTPGetterInfo } // Body constructs new RetryReader stream for reading data. If a connection failes // while reading, it will make additional requests to reestablish a connection and // continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 // (the default), returns the original response body and no retries will be performed. func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser { if o.MaxRetryRequests == 0 { // No additional retries return r.Response().Body } return NewRetryReader(r.ctx, r.Response(), r.getInfo, o, func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, BlobAccessConditions{ ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag}, }, false) if err != nil { return nil, err } return resp.Response(), err }, ) } // Response returns the raw HTTP response object. func (r DownloadResponse) Response() *http.Response { return r.r.Response() } // NewHTTPHeaders returns the user-modifiable properties for this blob. func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders { return r.r.NewHTTPHeaders() } // BlobContentMD5 returns the value for header x-ms-blob-content-md5. func (r DownloadResponse) BlobContentMD5() []byte { return r.r.BlobContentMD5() } // ContentMD5 returns the value for header Content-MD5. func (r DownloadResponse) ContentMD5() []byte { return r.r.ContentMD5() } // StatusCode returns the HTTP status code of the response, e.g. 200. func (r DownloadResponse) StatusCode() int { return r.r.StatusCode() } // Status returns the HTTP status message of the response, e.g. "200 OK". func (r DownloadResponse) Status() string { return r.r.Status() } // AcceptRanges returns the value for header Accept-Ranges. func (r DownloadResponse) AcceptRanges() string { return r.r.AcceptRanges() } // BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. func (r DownloadResponse) BlobCommittedBlockCount() int32 { return r.r.BlobCommittedBlockCount() } // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. func (r DownloadResponse) BlobSequenceNumber() int64 { return r.r.BlobSequenceNumber() } // BlobType returns the value for header x-ms-blob-type. func (r DownloadResponse) BlobType() BlobType { return r.r.BlobType() } // CacheControl returns the value for header Cache-Control. func (r DownloadResponse) CacheControl() string { return r.r.CacheControl() } // ContentDisposition returns the value for header Content-Disposition. func (r DownloadResponse) ContentDisposition() string { return r.r.ContentDisposition() } // ContentEncoding returns the value for header Content-Encoding. func (r DownloadResponse) ContentEncoding() string { return r.r.ContentEncoding() } // ContentLanguage returns the value for header Content-Language. func (r DownloadResponse) ContentLanguage() string { return r.r.ContentLanguage() } // ContentLength returns the value for header Content-Length. func (r DownloadResponse) ContentLength() int64 { return r.r.ContentLength() } // ContentRange returns the value for header Content-Range. func (r DownloadResponse) ContentRange() string { return r.r.ContentRange() } // ContentType returns the value for header Content-Type. func (r DownloadResponse) ContentType() string { return r.r.ContentType() } // CopyCompletionTime returns the value for header x-ms-copy-completion-time. func (r DownloadResponse) CopyCompletionTime() time.Time { return r.r.CopyCompletionTime() } // CopyID returns the value for header x-ms-copy-id. func (r DownloadResponse) CopyID() string { return r.r.CopyID() } // CopyProgress returns the value for header x-ms-copy-progress. func (r DownloadResponse) CopyProgress() string { return r.r.CopyProgress() } // CopySource returns the value for header x-ms-copy-source. func (r DownloadResponse) CopySource() string { return r.r.CopySource() } // CopyStatus returns the value for header x-ms-copy-status. func (r DownloadResponse) CopyStatus() CopyStatusType { return r.r.CopyStatus() } // CopyStatusDescription returns the value for header x-ms-copy-status-description. func (r DownloadResponse) CopyStatusDescription() string { return r.r.CopyStatusDescription() } // Date returns the value for header Date. func (r DownloadResponse) Date() time.Time { return r.r.Date() } // ETag returns the value for header ETag. func (r DownloadResponse) ETag() ETag { return r.r.ETag() } // IsServerEncrypted returns the value for header x-ms-server-encrypted. func (r DownloadResponse) IsServerEncrypted() string { return r.r.IsServerEncrypted() } // LastModified returns the value for header Last-Modified. func (r DownloadResponse) LastModified() time.Time { return r.r.LastModified() } // LeaseDuration returns the value for header x-ms-lease-duration. func (r DownloadResponse) LeaseDuration() LeaseDurationType { return r.r.LeaseDuration() } // LeaseState returns the value for header x-ms-lease-state. func (r DownloadResponse) LeaseState() LeaseStateType { return r.r.LeaseState() } // LeaseStatus returns the value for header x-ms-lease-status. func (r DownloadResponse) LeaseStatus() LeaseStatusType { return r.r.LeaseStatus() } // RequestID returns the value for header x-ms-request-id. func (r DownloadResponse) RequestID() string { return r.r.RequestID() } // Version returns the value for header x-ms-version. func (r DownloadResponse) Version() string { return r.r.Version() } // NewMetadata returns user-defined key/value pairs. func (r DownloadResponse) NewMetadata() Metadata { return r.r.NewMetadata() } azure-storage-blob-go-0.10.0/go.mod000066400000000000000000000005771367515646300170360ustar00rootroot00000000000000module github.com/Azure/azure-storage-blob-go go 1.13 require ( github.com/Azure/azure-pipeline-go v0.2.2 github.com/Azure/go-autorest/autorest/adal v0.8.3 github.com/google/uuid v1.1.1 github.com/kr/pretty v0.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect golang.org/x/sys v0.0.0-20190412213103-97732733099d gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 ) azure-storage-blob-go-0.10.0/go.sum000066400000000000000000000077121367515646300170610ustar00rootroot00000000000000github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= azure-storage-blob-go-0.10.0/swagger/000077500000000000000000000000001367515646300173565ustar00rootroot00000000000000azure-storage-blob-go-0.10.0/swagger/README.md000066400000000000000000000007751367515646300206460ustar00rootroot00000000000000# Azure Blob Storage for Golang > see https://aka.ms/autorest ### Generation ```bash cd swagger autorest README.md --use=@microsoft.azure/autorest.go@v3.0.63 gofmt -w Go_BlobStorage/* ``` ### Settings ``` yaml input-file: ./blob.json go: true output-folder: Go_BlobStorage namespace: azblob go-export-clients: false enable-xml: true file-prefix: zz_generated_ ``` ### TODO: Get rid of StorageError since we define it ### TODO: rfc3339Format = "2006-01-02T15:04:05Z" //This was wrong in the generated code azure-storage-blob-go-0.10.0/swagger/blob.json000066400000000000000000013256231367515646300212030ustar00rootroot00000000000000{ "swagger": "2.0", "info": { "title": "Azure Blob Storage", "version": "2019-02-02", "x-ms-code-generation-settings": { "header": "MIT", "strictSpecAdherence": false } }, "x-ms-parameterized-host": { "hostTemplate": "{url}", "useSchemePrefix": false, "positionInOperation": "first", "parameters": [ { "$ref": "#/parameters/Url" } ] }, "securityDefinitions": { "blob_shared_key": { "type": "apiKey", "name": "Authorization", "in": "header" } }, "schemes": [ "https" ], "consumes": [ "application/xml" ], "produces": [ "application/xml" ], "paths": {}, "x-ms-paths": { "/?restype=service&comp=properties": { "put": { "tags": [ "service" ], "operationId": "Service_SetProperties", "description": "Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules", "parameters": [ { "$ref": "#/parameters/StorageServiceProperties" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "Success (Accepted)", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "get": { "tags": [ "service" ], "operationId": "Service_GetProperties", "description": "gets the properties of a storage account's Blob service, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing) rules.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." } }, "schema": { "$ref": "#/definitions/StorageServiceProperties" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "service" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/?restype=service&comp=stats": { "get": { "tags": [ "service" ], "operationId": "Service_GetStatistics", "description": "Retrieves statistics related to replication for the Blob service. It is only available on the secondary location endpoint when read-access geo-redundant replication is enabled for the storage account.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/StorageServiceStats" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "service" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "stats" ] } ] }, "/?comp=list": { "get": { "tags": [ "service" ], "operationId": "Service_ListContainersSegment", "description": "The List Containers Segment operation returns a list of the containers under the specified account", "parameters": [ { "$ref": "#/parameters/Prefix" }, { "$ref": "#/parameters/Marker" }, { "$ref": "#/parameters/MaxResults" }, { "$ref": "#/parameters/ListContainersInclude" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." } }, "schema": { "$ref": "#/definitions/ListContainersSegmentResponse" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } }, "x-ms-pageable": { "nextLinkName": "NextMarker" } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "list" ] } ] }, "/?restype=service&comp=userdelegationkey": { "post": { "tags": [ "service" ], "operationId": "Service_GetUserDelegationKey", "description": "Retrieves a user delegation key for the Blob service. This is only a valid operation when using bearer token authentication.", "parameters": [ { "$ref": "#/parameters/KeyInfo" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/UserDelegationKey" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "service" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "userdelegationkey" ] } ] }, "/?restype=account&comp=properties": { "get": { "tags": [ "service" ], "operationId": "Service_GetAccountInfo", "description": "Returns the sku name and account kind ", "parameters": [ { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Success (OK)", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-sku-name": { "x-ms-client-name": "SkuName", "type": "string", "enum": [ "Standard_LRS", "Standard_GRS", "Standard_RAGRS", "Standard_ZRS", "Premium_LRS" ], "x-ms-enum": { "name": "SkuName", "modelAsString": false }, "description": "Identifies the sku name of the account" }, "x-ms-account-kind": { "x-ms-client-name": "AccountKind", "type": "string", "enum": [ "Storage", "BlobStorage", "StorageV2" ], "x-ms-enum": { "name": "AccountKind", "modelAsString": false }, "description": "Identifies the account kind" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "account" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/?comp=batch": { "post": { "tags": [ "service" ], "operationId": "Service_SubmitBatch", "description": "The Batch operation allows multiple API calls to be embedded into a single HTTP request.", "parameters": [ { "$ref": "#/parameters/Body" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/MultipartContentType" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "Content-Type": { "type": "string", "description": "The media type of the body of the response. For batch requests, this is multipart/mixed; boundary=batchresponse_GUID" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." } }, "schema": { "type": "object", "format": "file" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "batch" ] } ] }, "/{containerName}?restype=container": { "put": { "tags": [ "container" ], "operationId": "Container_Create", "description": "creates a new container under the specified account. If the container with the same name already exists, the operation fails", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/BlobPublicAccess" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "Success, Container created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "get": { "tags": [ "container" ], "operationId": "Container_GetProperties", "description": "returns all user-defined metadata and system properties for the specified container. The data returned does not include the container's list of blobs", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success", "headers": { "x-ms-meta": { "type": "string", "x-ms-client-name": "Metadata", "x-ms-header-collection-prefix": "x-ms-meta-" }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-duration": { "x-ms-client-name": "LeaseDuration", "description": "When a blob is leased, specifies whether the lease is of infinite or fixed duration.", "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "x-ms-lease-state": { "x-ms-client-name": "LeaseState", "description": "Lease state of the blob.", "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "x-ms-lease-status": { "x-ms-client-name": "LeaseStatus", "description": "The current lease status of the blob.", "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-public-access": { "x-ms-client-name": "BlobPublicAccess", "description": "Indicated whether data in the container may be accessed publicly and the level of access", "type": "string", "enum": [ "container", "blob" ], "x-ms-enum": { "name": "PublicAccessType", "modelAsString": true } }, "x-ms-has-immutability-policy": { "x-ms-client-name": "HasImmutabilityPolicy", "description": "Indicates whether the container has an immutability policy set on it.", "type": "boolean" }, "x-ms-has-legal-hold": { "x-ms-client-name": "HasLegalHold", "description": "Indicates whether the container has a legal hold.", "type": "boolean" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "delete": { "tags": [ "container" ], "operationId": "Container_Delete", "description": "operation marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "Accepted", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] } ] }, "/{containerName}?restype=container&comp=metadata": { "put": { "tags": [ "container" ], "operationId": "Container_SetMetadata", "description": "operation sets one or more user-defined name-value pairs for the specified container.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "metadata" ] } ] }, "/{containerName}?restype=container&comp=acl": { "get": { "tags": [ "container" ], "operationId": "Container_GetAccessPolicy", "description": "gets the permissions for the specified container. The permissions indicate whether container data may be accessed publicly.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success", "headers": { "x-ms-blob-public-access": { "x-ms-client-name": "BlobPublicAccess", "description": "Indicated whether data in the container may be accessed publicly and the level of access", "type": "string", "enum": [ "container", "blob" ], "x-ms-enum": { "name": "PublicAccessType", "modelAsString": true } }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/SignedIdentifiers" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "put": { "tags": [ "container" ], "operationId": "Container_SetAccessPolicy", "description": "sets the permissions for the specified container. The permissions indicate whether blobs in a container may be accessed publicly.", "parameters": [ { "$ref": "#/parameters/ContainerAcl" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobPublicAccess" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "acl" ] } ] }, "/{containerName}?comp=lease&restype=container&acquire": { "put": { "tags": [ "container" ], "operationId": "Container_AcquireLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseDuration" }, { "$ref": "#/parameters/ProposedLeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The Acquire operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a container's lease" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "acquire" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?comp=lease&restype=container&release": { "put": { "tags": [ "container" ], "operationId": "Container_ReleaseLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Release operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "release" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?comp=lease&restype=container&renew": { "put": { "tags": [ "container" ], "operationId": "Container_RenewLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Renew operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a container's lease" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "renew" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?comp=lease&restype=container&break": { "put": { "tags": [ "container" ], "operationId": "Container_BreakLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseBreakPeriod" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The Break operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-time": { "x-ms-client-name": "LeaseTime", "type": "integer", "description": "Approximate time remaining in the lease period, in seconds." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "break" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?comp=lease&restype=container&change": { "put": { "tags": [ "container" ], "operationId": "Container_ChangeLease", "description": "[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/ProposedLeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Change operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a container's lease" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "change" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}?restype=container&comp=list&flat": { "get": { "tags": [ "containers" ], "operationId": "Container_ListBlobFlatSegment", "description": "[Update] The List Blobs operation returns a list of the blobs under the specified container", "parameters": [ { "$ref": "#/parameters/Prefix" }, { "$ref": "#/parameters/Marker" }, { "$ref": "#/parameters/MaxResults" }, { "$ref": "#/parameters/ListBlobsInclude" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "Content-Type": { "type": "string", "description": "The media type of the body of the response. For List Blobs this is 'application/xml'" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/ListBlobsFlatSegmentResponse" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } }, "x-ms-pageable": { "nextLinkName": "NextMarker" } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "list" ] } ] }, "/{containerName}?restype=container&comp=list&hierarchy": { "get": { "tags": [ "containers" ], "operationId": "Container_ListBlobHierarchySegment", "description": "[Update] The List Blobs operation returns a list of the blobs under the specified container", "parameters": [ { "$ref": "#/parameters/Prefix" }, { "$ref": "#/parameters/Delimiter" }, { "$ref": "#/parameters/Marker" }, { "$ref": "#/parameters/MaxResults" }, { "$ref": "#/parameters/ListBlobsInclude" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Success.", "headers": { "Content-Type": { "type": "string", "description": "The media type of the body of the response. For List Blobs this is 'application/xml'" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/ListBlobsHierarchySegmentResponse" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } }, "x-ms-pageable": { "nextLinkName": "NextMarker" } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "container" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "list" ] } ] }, "/{containerName}?restype=account&comp=properties": { "get": { "tags": [ "container" ], "operationId": "Container_GetAccountInfo", "description": "Returns the sku name and account kind ", "parameters": [ { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Success (OK)", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-sku-name": { "x-ms-client-name": "SkuName", "type": "string", "enum": [ "Standard_LRS", "Standard_GRS", "Standard_RAGRS", "Standard_ZRS", "Premium_LRS" ], "x-ms-enum": { "name": "SkuName", "modelAsString": false }, "description": "Identifies the sku name of the account" }, "x-ms-account-kind": { "x-ms-client-name": "AccountKind", "type": "string", "enum": [ "Storage", "BlobStorage", "StorageV2" ], "x-ms-enum": { "name": "AccountKind", "modelAsString": false }, "description": "Identifies the account kind" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "account" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{filesystem}/{path}?resource=directory&Create": { "put": { "tags": [ "directory" ], "operationId": "Directory_Create", "description": "Create a directory. By default, the destination is overwritten and if the destination already exists and has a lease the lease is broken. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). To fail if the destination already exists, use a conditional request with If-None-Match: \"*\".", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/DirectoryProperties" }, { "$ref": "#/parameters/PosixPermissions" }, { "$ref": "#/parameters/PosixUmask" }, { "$ref": "#/parameters/XMsCacheControl" }, { "$ref": "#/parameters/XMsContentType" }, { "$ref": "#/parameters/XMsContentEncoding" }, { "$ref": "#/parameters/XMsContentLanguage" }, { "$ref": "#/parameters/XMsContentDisposition" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The file or directory was created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "An HTTP entity tag associated with the file or directory." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." }, "Content-Length": { "type": "integer", "format": "int64", "description": "The size of the resource in bytes." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "A UTC date/time value generated by the service that indicates the time at which the response was initiated." } } }, "default": { "description": "Failure", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } }, "schema": { "$ref": "#/definitions/DataLakeStorageError" } } } }, "parameters": [ { "name": "resource", "in": "query", "required": true, "type": "string", "enum": [ "directory" ] } ] }, "/{filesystem}/{path}?DirectoryRename": { "put": { "tags": [ "directory" ], "operationId": "Directory_Rename", "description": "Rename a directory. By default, the destination is overwritten and if the destination already exists and has a lease the lease is broken. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). To fail if the destination already exists, use a conditional request with If-None-Match: \"*\".", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Continuation" }, { "$ref": "#/parameters/PathRenameMode" }, { "$ref": "#/parameters/FileRenameSource" }, { "$ref": "#/parameters/DirectoryProperties" }, { "$ref": "#/parameters/PosixPermissions" }, { "$ref": "#/parameters/PosixUmask" }, { "$ref": "#/parameters/XMsCacheControl" }, { "$ref": "#/parameters/XMsContentType" }, { "$ref": "#/parameters/XMsContentEncoding" }, { "$ref": "#/parameters/XMsContentLanguage" }, { "$ref": "#/parameters/XMsContentDisposition" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/SourceLeaseId" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The directory was renamed.", "headers": { "x-ms-continuation": { "x-ms-client-name": "marker", "type": "string", "description": "When renaming a directory, the number of paths that are renamed with each invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the rename operation to continue renaming the directory." }, "ETag": { "type": "string", "format": "etag", "description": "An HTTP entity tag associated with the file or directory." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." }, "Content-Length": { "type": "integer", "format": "int64", "description": "The size of the resource in bytes." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "A UTC date/time value generated by the service that indicates the time at which the response was initiated." } } }, "default": { "description": "Failure", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } }, "schema": { "$ref": "#/definitions/DataLakeStorageError" } } } } }, "/{filesystem}/{path}?DirectoryDelete": { "delete": { "tags": [ "directory" ], "operationId": "Directory_Delete", "description": "Deletes the directory", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/RecursiveDirectoryDelete" }, { "$ref": "#/parameters/Continuation" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The directory was deleted.", "headers": { "x-ms-continuation": { "x-ms-client-name": "marker", "type": "string", "description": "When renaming a directory, the number of paths that are renamed with each invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the rename operation to continue renaming the directory." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "A UTC date/time value generated by the service that indicates the time at which the response was initiated." } } }, "default": { "description": "Failure", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } }, "schema": { "$ref": "#/definitions/DataLakeStorageError" } } } } }, "/{filesystem}/{path}?action=setAccessControl&directory": { "patch": { "tags": [ "directory" ], "operationId": "Directory_SetAccessControl", "description": "Set the owner, group, permissions, or access control list for a directory.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/Owner" }, { "$ref": "#/parameters/Group" }, { "$ref": "#/parameters/PosixPermissions" }, { "$ref": "#/parameters/PosixAcl" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ClientRequestId" }, { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Set directory access control response.", "headers": { "Date": { "type": "string", "format": "date-time-rfc1123", "description": "A UTC date/time value generated by the service that indicates the time at which the response was initiated." }, "ETag": { "type": "string", "format": "etag", "description": "An HTTP entity tag associated with the file or directory." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } } }, "default": { "description": "Failure", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } }, "schema": { "$ref": "#/definitions/DataLakeStorageError" } } } }, "parameters": [ { "name": "action", "in": "query", "required": true, "type": "string", "enum": [ "setAccessControl" ] } ] }, "/{filesystem}/{path}?action=getAccessControl&directory": { "head": { "tags": [ "directory" ], "operationId": "Directory_GetAccessControl", "description": "Get the owner, group, permissions, or access control list for a directory.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Upn" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ClientRequestId" }, { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Get directory access control response.", "headers": { "Date": { "type": "string", "format": "date-time-rfc1123", "description": "A UTC date/time value generated by the service that indicates the time at which the response was initiated." }, "ETag": { "type": "string", "format": "etag", "description": "An HTTP entity tag associated with the file or directory." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time." }, "x-ms-owner": { "description": "The owner of the file or directory. Included in the response if Hierarchical Namespace is enabled for the account.", "type": "string" }, "x-ms-group": { "description": "The owning group of the file or directory. Included in the response if Hierarchical Namespace is enabled for the account.", "type": "string" }, "x-ms-permissions": { "description": "The POSIX access permissions for the file owner, the file owning group, and others. Included in the response if Hierarchical Namespace is enabled for the account.", "type": "string" }, "x-ms-acl": { "description": "The POSIX access control list for the file or directory. Included in the response only if the action is \"getAccessControl\" and Hierarchical Namespace is enabled for the account.", "type": "string" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } } }, "default": { "description": "Failure", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } }, "schema": { "$ref": "#/definitions/DataLakeStorageError" } } } }, "parameters": [ { "name": "action", "in": "query", "required": true, "type": "string", "enum": [ "getAccessControl" ] } ] }, "/{containerName}/{blob}": { "get": { "tags": [ "blob" ], "operationId": "Blob_Download", "description": "The Download operation reads or downloads a blob from the system, including its metadata and properties. You can also call Download to read a snapshot.", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/GetRangeContentMD5" }, { "$ref": "#/parameters/GetRangeContentCRC64" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Returns the content of the entire blob.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-meta": { "type": "string", "x-ms-client-name": "Metadata", "x-ms-header-collection-prefix": "x-ms-meta-" }, "Content-Length": { "type": "integer", "format": "int64", "description": "The number of bytes present in the response body." }, "Content-Type": { "type": "string", "description": "The media type of the body of the response. For Download Blob this is 'application/octet-stream'" }, "Content-Range": { "type": "string", "description": "Indicates the range of bytes returned in the event that the client requested a subset of the blob by setting the 'Range' request header." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "Content-Encoding": { "type": "string", "description": "This header returns the value that was specified for the Content-Encoding request header" }, "Cache-Control": { "type": "string", "description": "This header is returned if it was previously specified for the blob." }, "Content-Disposition": { "type": "string", "description": "This header returns the value that was specified for the 'x-ms-blob-content-disposition' header. The Content-Disposition response header field conveys additional information about how to process the response payload, and also can be used to attach additional metadata. For example, if set to attachment, it indicates that the user-agent should not display the response, but instead show a Save As dialog with a filename other than the blob name specified." }, "Content-Language": { "type": "string", "description": "This header returns the value that was specified for the Content-Language request header." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-blob-type": { "x-ms-client-name": "BlobType", "description": "The blob's type.", "type": "string", "enum": [ "BlockBlob", "PageBlob", "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } }, "x-ms-copy-completion-time": { "x-ms-client-name": "CopyCompletionTime", "type": "string", "format": "date-time-rfc1123", "description": "Conclusion time of the last attempted Copy Blob operation where this blob was the destination blob. This value can specify the time of a completed, aborted, or failed copy attempt. This header does not appear if a copy is pending, if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status-description": { "x-ms-client-name": "CopyStatusDescription", "type": "string", "description": "Only appears when x-ms-copy-status is failed or pending. Describes the cause of the last fatal or non-fatal copy operation failure. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-progress": { "x-ms-client-name": "CopyProgress", "type": "string", "description": "Contains the number of bytes copied and the total bytes in the source in the last attempted Copy Blob operation where this blob was the destination blob. Can show between 0 and Content-Length bytes copied. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-source": { "x-ms-client-name": "CopySource", "type": "string", "description": "URL up to 2 KB in length that specifies the source blob or file used in the last attempted Copy Blob operation where this blob was the destination blob. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } }, "x-ms-lease-duration": { "x-ms-client-name": "LeaseDuration", "description": "When a blob is leased, specifies whether the lease is of infinite or fixed duration.", "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "x-ms-lease-state": { "x-ms-client-name": "LeaseState", "description": "Lease state of the blob.", "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "x-ms-lease-status": { "x-ms-client-name": "LeaseStatus", "description": "The current lease status of the blob.", "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Accept-Ranges": { "type": "string", "description": "Indicates that the service supports requests for partial blob content." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." }, "x-ms-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the blob data and application metadata are completely encrypted using the specified algorithm. Otherwise, the value is set to false (when the blob is unencrypted, or if only parts of the blob/application metadata are encrypted)." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the blob. This header is only returned when the blob was encrypted with a customer-provided key." }, "x-ms-blob-content-md5": { "x-ms-client-name": "BlobContentMD5", "type": "string", "format": "byte", "description": "If the blob has a MD5 hash, and if request contains range header (Range or x-ms-range), this response header is returned with the value of the whole blob's MD5 value. This value may or may not be equal to the value returned in Content-MD5 header, with the latter calculated from the requested range" } }, "schema": { "type": "object", "format": "file" } }, "206": { "description": "Returns the content of a specified range of the blob.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-meta": { "type": "string", "x-ms-client-name": "Metadata", "x-ms-header-collection-prefix": "x-ms-meta-" }, "Content-Length": { "type": "integer", "format": "int64", "description": "The number of bytes present in the response body." }, "Content-Type": { "type": "string", "description": "The media type of the body of the response. For Download Blob this is 'application/octet-stream'" }, "Content-Range": { "type": "string", "description": "Indicates the range of bytes returned in the event that the client requested a subset of the blob by setting the 'Range' request header." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "Content-Encoding": { "type": "string", "description": "This header returns the value that was specified for the Content-Encoding request header" }, "Cache-Control": { "type": "string", "description": "This header is returned if it was previously specified for the blob." }, "Content-Disposition": { "type": "string", "description": "This header returns the value that was specified for the 'x-ms-blob-content-disposition' header. The Content-Disposition response header field conveys additional information about how to process the response payload, and also can be used to attach additional metadata. For example, if set to attachment, it indicates that the user-agent should not display the response, but instead show a Save As dialog with a filename other than the blob name specified." }, "Content-Language": { "type": "string", "description": "This header returns the value that was specified for the Content-Language request header." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-blob-type": { "x-ms-client-name": "BlobType", "description": "The blob's type.", "type": "string", "enum": [ "BlockBlob", "PageBlob", "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } }, "x-ms-content-crc64": { "x-ms-client-name": "ContentCrc64", "type": "string", "format": "byte", "description": "If the request is to read a specified range and the x-ms-range-get-content-crc64 is set to true, then the request returns a crc64 for the range, as long as the range size is less than or equal to 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 is specified in the same request, it will fail with 400(Bad Request)" }, "x-ms-copy-completion-time": { "x-ms-client-name": "CopyCompletionTime", "type": "string", "format": "date-time-rfc1123", "description": "Conclusion time of the last attempted Copy Blob operation where this blob was the destination blob. This value can specify the time of a completed, aborted, or failed copy attempt. This header does not appear if a copy is pending, if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status-description": { "x-ms-client-name": "CopyStatusDescription", "type": "string", "description": "Only appears when x-ms-copy-status is failed or pending. Describes the cause of the last fatal or non-fatal copy operation failure. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-progress": { "x-ms-client-name": "CopyProgress", "type": "string", "description": "Contains the number of bytes copied and the total bytes in the source in the last attempted Copy Blob operation where this blob was the destination blob. Can show between 0 and Content-Length bytes copied. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-source": { "x-ms-client-name": "CopySource", "type": "string", "description": "URL up to 2 KB in length that specifies the source blob or file used in the last attempted Copy Blob operation where this blob was the destination blob. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } }, "x-ms-lease-duration": { "x-ms-client-name": "LeaseDuration", "description": "When a blob is leased, specifies whether the lease is of infinite or fixed duration.", "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "x-ms-lease-state": { "x-ms-client-name": "LeaseState", "description": "Lease state of the blob.", "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "x-ms-lease-status": { "x-ms-client-name": "LeaseStatus", "description": "The current lease status of the blob.", "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Accept-Ranges": { "type": "string", "description": "Indicates that the service supports requests for partial blob content." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." }, "x-ms-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the blob data and application metadata are completely encrypted using the specified algorithm. Otherwise, the value is set to false (when the blob is unencrypted, or if only parts of the blob/application metadata are encrypted)." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the blob. This header is only returned when the blob was encrypted with a customer-provided key." }, "x-ms-blob-content-md5": { "x-ms-client-name": "BlobContentMD5", "type": "string", "format": "byte", "description": "If the blob has a MD5 hash, and if request contains range header (Range or x-ms-range), this response header is returned with the value of the whole blob's MD5 value. This value may or may not be equal to the value returned in Content-MD5 header, with the latter calculated from the requested range" } }, "schema": { "type": "object", "format": "file" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "head": { "tags": [ "blob" ], "operationId": "Blob_GetProperties", "description": "The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. It does not return the content of the blob.", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Returns the properties of the blob.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-creation-time": { "x-ms-client-name": "CreationTime", "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was created." }, "x-ms-meta": { "type": "string", "x-ms-client-name": "Metadata", "x-ms-header-collection-prefix": "x-ms-meta-" }, "x-ms-blob-type": { "x-ms-client-name": "BlobType", "description": "The blob's type.", "type": "string", "enum": [ "BlockBlob", "PageBlob", "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } }, "x-ms-copy-completion-time": { "x-ms-client-name": "CopyCompletionTime", "type": "string", "format": "date-time-rfc1123", "description": "Conclusion time of the last attempted Copy Blob operation where this blob was the destination blob. This value can specify the time of a completed, aborted, or failed copy attempt. This header does not appear if a copy is pending, if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status-description": { "x-ms-client-name": "CopyStatusDescription", "type": "string", "description": "Only appears when x-ms-copy-status is failed or pending. Describes the cause of the last fatal or non-fatal copy operation failure. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-progress": { "x-ms-client-name": "CopyProgress", "type": "string", "description": "Contains the number of bytes copied and the total bytes in the source in the last attempted Copy Blob operation where this blob was the destination blob. Can show between 0 and Content-Length bytes copied. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List" }, "x-ms-copy-source": { "x-ms-client-name": "CopySource", "type": "string", "description": "URL up to 2 KB in length that specifies the source blob or file used in the last attempted Copy Blob operation where this blob was the destination blob. This header does not appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } }, "x-ms-incremental-copy": { "x-ms-client-name": "IsIncrementalCopy", "type": "boolean", "description": "Included if the blob is incremental copy blob." }, "x-ms-copy-destination-snapshot": { "x-ms-client-name": "DestinationSnapshot", "type": "string", "description": "Included if the blob is incremental copy blob or incremental copy snapshot, if x-ms-copy-status is success. Snapshot time of the last successful incremental copy snapshot for this blob." }, "x-ms-lease-duration": { "x-ms-client-name": "LeaseDuration", "description": "When a blob is leased, specifies whether the lease is of infinite or fixed duration.", "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "x-ms-lease-state": { "x-ms-client-name": "LeaseState", "description": "Lease state of the blob.", "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "x-ms-lease-status": { "x-ms-client-name": "LeaseStatus", "description": "The current lease status of the blob.", "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "Content-Length": { "type": "integer", "format": "int64", "description": "The number of bytes present in the response body." }, "Content-Type": { "type": "string", "description": "The content type specified for the blob. The default content type is 'application/octet-stream'" }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "Content-Encoding": { "type": "string", "description": "This header returns the value that was specified for the Content-Encoding request header" }, "Content-Disposition": { "type": "string", "description": "This header returns the value that was specified for the 'x-ms-blob-content-disposition' header. The Content-Disposition response header field conveys additional information about how to process the response payload, and also can be used to attach additional metadata. For example, if set to attachment, it indicates that the user-agent should not display the response, but instead show a Save As dialog with a filename other than the blob name specified." }, "Content-Language": { "type": "string", "description": "This header returns the value that was specified for the Content-Language request header." }, "Cache-Control": { "type": "string", "description": "This header is returned if it was previously specified for the blob." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "Accept-Ranges": { "type": "string", "description": "Indicates that the service supports requests for partial blob content." }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." }, "x-ms-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the blob data and application metadata are completely encrypted using the specified algorithm. Otherwise, the value is set to false (when the blob is unencrypted, or if only parts of the blob/application metadata are encrypted)." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the metadata. This header is only returned when the metadata was encrypted with a customer-provided key." }, "x-ms-access-tier": { "x-ms-client-name": "AccessTier", "type": "string", "description": "The tier of page blob on a premium storage account or tier of block blob on blob storage LRS accounts. For a list of allowed premium page blob tiers, see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/premium-storage#features. For blob storage LRS accounts, valid values are Hot/Cool/Archive." }, "x-ms-access-tier-inferred": { "x-ms-client-name": "AccessTierInferred", "type": "boolean", "description": "For page blobs on a premium storage account only. If the access tier is not explicitly set on the blob, the tier is inferred based on its content length and this header will be returned with true value." }, "x-ms-archive-status": { "x-ms-client-name": "ArchiveStatus", "type": "string", "description": "For blob storage LRS accounts, valid values are rehydrate-pending-to-hot/rehydrate-pending-to-cool. If the blob is being rehydrated and is not complete then this header is returned indicating that rehydrate is pending and also tells the destination tier." }, "x-ms-access-tier-change-time": { "x-ms-client-name": "AccessTierChangeTime", "type": "string", "format": "date-time-rfc1123", "description": "The time the tier was changed on the object. This is only returned if the tier on the block blob was ever set." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "delete": { "tags": [ "blob" ], "operationId": "Blob_Delete", "description": "If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify the \"include=deleted\" query parameter to discover which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 (ResourceNotFound).", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/DeleteSnapshots" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The delete request was accepted and the blob will be deleted.", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } } }, "/{filesystem}/{path}?action=setAccessControl&blob": { "patch": { "tags": [ "blob" ], "operationId": "Blob_SetAccessControl", "description": "Set the owner, group, permissions, or access control list for a blob.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/Owner" }, { "$ref": "#/parameters/Group" }, { "$ref": "#/parameters/PosixPermissions" }, { "$ref": "#/parameters/PosixAcl" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ClientRequestId" }, { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Set blob access control response.", "headers": { "Date": { "type": "string", "format": "date-time-rfc1123", "description": "A UTC date/time value generated by the service that indicates the time at which the response was initiated." }, "ETag": { "type": "string", "format": "etag", "description": "An HTTP entity tag associated with the file or directory." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } } }, "default": { "description": "Failure", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } }, "schema": { "$ref": "#/definitions/DataLakeStorageError" } } } }, "parameters": [ { "name": "action", "in": "query", "required": true, "type": "string", "enum": [ "setAccessControl" ] } ] }, "/{filesystem}/{path}?action=getAccessControl&blob": { "head": { "tags": [ "blob" ], "operationId": "Blob_GetAccessControl", "description": "Get the owner, group, permissions, or access control list for a blob.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Upn" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/ClientRequestId" }, { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Get blob access control response.", "headers": { "Date": { "type": "string", "format": "date-time-rfc1123", "description": "A UTC date/time value generated by the service that indicates the time at which the response was initiated." }, "ETag": { "type": "string", "format": "etag", "description": "An HTTP entity tag associated with the file or directory." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time." }, "x-ms-owner": { "description": "The owner of the file or directory. Included in the response if Hierarchical Namespace is enabled for the account.", "type": "string" }, "x-ms-group": { "description": "The owning group of the file or directory. Included in the response if Hierarchical Namespace is enabled for the account.", "type": "string" }, "x-ms-permissions": { "description": "The POSIX access permissions for the file owner, the file owning group, and others. Included in the response if Hierarchical Namespace is enabled for the account.", "type": "string" }, "x-ms-acl": { "description": "The POSIX access control list for the file or directory. Included in the response only if the action is \"getAccessControl\" and Hierarchical Namespace is enabled for the account.", "type": "string" }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } } }, "default": { "description": "Failure", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } }, "schema": { "$ref": "#/definitions/DataLakeStorageError" } } } }, "parameters": [ { "name": "action", "in": "query", "required": true, "type": "string", "enum": [ "getAccessControl" ] } ] }, "/{filesystem}/{path}?FileRename": { "put": { "tags": [ "blob" ], "operationId": "Blob_Rename", "description": "Rename a blob/file. By default, the destination is overwritten and if the destination already exists and has a lease the lease is broken. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). To fail if the destination already exists, use a conditional request with If-None-Match: \"*\".", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/PathRenameMode" }, { "$ref": "#/parameters/FileRenameSource" }, { "$ref": "#/parameters/DirectoryProperties" }, { "$ref": "#/parameters/PosixPermissions" }, { "$ref": "#/parameters/PosixUmask" }, { "$ref": "#/parameters/XMsCacheControl" }, { "$ref": "#/parameters/XMsContentType" }, { "$ref": "#/parameters/XMsContentEncoding" }, { "$ref": "#/parameters/XMsContentLanguage" }, { "$ref": "#/parameters/XMsContentDisposition" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/SourceLeaseId" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The file was renamed.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "An HTTP entity tag associated with the file or directory." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." }, "Content-Length": { "type": "integer", "format": "int64", "description": "The size of the resource in bytes." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "A UTC date/time value generated by the service that indicates the time at which the response was initiated." } } }, "default": { "description": "Failure", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "A server-generated UUID recorded in the analytics logs for troubleshooting and correlation." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "The version of the REST protocol used to process the request." } }, "schema": { "$ref": "#/definitions/DataLakeStorageError" } } } } }, "/{containerName}/{blob}?PageBlob": { "put": { "tags": [ "blob" ], "operationId": "PageBlob_Create", "description": "The Create operation creates a new page blob.", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/PremiumPageBlobAccessTierOptional" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/BlobContentLengthRequired" }, { "$ref": "#/parameters/BlobSequenceNumber" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The blob was created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the blob. This header is only returned when the blob was encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "x-ms-blob-type", "x-ms-client-name": "blobType", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Specifies the type of blob to create: block blob, page blob, or append blob.", "type": "string", "enum": [ "PageBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } } ] }, "/{containerName}/{blob}?AppendBlob": { "put": { "tags": [ "blob" ], "operationId": "AppendBlob_Create", "description": "The Create Append Blob operation creates a new append blob.", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The blob was created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the blob. This header is only returned when the blob was encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "x-ms-blob-type", "x-ms-client-name": "blobType", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Specifies the type of blob to create: block blob, page blob, or append blob.", "type": "string", "enum": [ "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } } ] }, "/{containerName}/{blob}?BlockBlob": { "put": { "tags": [ "blob" ], "operationId": "BlockBlob_Upload", "description": "The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a block blob, use the Put Block List operation.", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Body" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentMD5" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/AccessTierOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The blob was updated.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the blob. This header is only returned when the blob was encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "x-ms-blob-type", "x-ms-client-name": "blobType", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Specifies the type of blob to create: block blob, page blob, or append blob.", "type": "string", "enum": [ "BlockBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } } ] }, "/{containerName}/{blob}?comp=undelete": { "put": { "tags": [ "blob" ], "operationId": "Blob_Undelete", "description": "Undelete a blob that was previously soft deleted", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The blob was undeleted successfully.", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "undelete" ] } ] }, "/{containerName}/{blob}?comp=properties&SetHTTPHeaders": { "put": { "tags": [ "blob" ], "operationId": "Blob_SetHTTPHeaders", "description": "The Set HTTP Headers operation sets system properties on the blob", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The properties were set successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}/{blob}?comp=metadata": { "put": { "tags": [ "blob" ], "operationId": "Blob_SetMetadata", "description": "The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value pairs", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The metadata was set successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the metadata. This header is only returned when the metadata was encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "metadata" ] } ] }, "/{containerName}/{blob}?comp=lease&acquire": { "put": { "tags": [ "blob" ], "operationId": "Blob_AcquireLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseDuration" }, { "$ref": "#/parameters/ProposedLeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The Acquire operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a blobs's lease" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "acquire" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=lease&release": { "put": { "tags": [ "blob" ], "operationId": "Blob_ReleaseLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Release operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "release" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=lease&renew": { "put": { "tags": [ "blob" ], "operationId": "Blob_RenewLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Renew operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a blobs's lease" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "renew" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=lease&change": { "put": { "tags": [ "blob" ], "operationId": "Blob_ChangeLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdRequired" }, { "$ref": "#/parameters/ProposedLeaseIdRequired" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Change operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-lease-id": { "x-ms-client-name": "LeaseId", "type": "string", "description": "Uniquely identifies a blobs's lease" }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "change" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=lease&break": { "put": { "tags": [ "blob" ], "operationId": "Blob_BreakLease", "description": "[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseBreakPeriod" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The Break operation completed successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-lease-time": { "x-ms-client-name": "LeaseTime", "type": "integer", "description": "Approximate time remaining in the lease period, in seconds." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "lease" ] }, { "name": "x-ms-lease-action", "x-ms-client-name": "action", "in": "header", "required": true, "type": "string", "enum": [ "break" ], "x-ms-enum": { "name": "LeaseAction", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Describes what lease action to take." } ] }, "/{containerName}/{blob}?comp=snapshot": { "put": { "tags": [ "blob" ], "operationId": "Blob_CreateSnapshot", "description": "The Create Snapshot operation creates a read-only snapshot of a blob", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The snaptshot was taken successfully.", "headers": { "x-ms-snapshot": { "x-ms-client-name": "Snapshot", "type": "string", "description": "Uniquely identifies the snapshot and indicates the snapshot version. It may be used in subsequent requests to access the snapshot" }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "True if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise. For a snapshot request, this header is set to true when metadata was provided in the request and encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "snapshot" ] } ] }, "/{containerName}/{blob}?comp=copy": { "put": { "tags": [ "blob" ], "operationId": "Blob_StartCopyFromURL", "description": "The Start Copy From URL operation copies a blob or an internet resource to a new blob.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/AccessTierOptional" }, { "$ref": "#/parameters/RehydratePriority" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/CopySource" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The copy blob has been accepted with the specified copy status.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [] }, "/{containerName}/{blob}?comp=copy&sync": { "put": { "tags": [ "blob" ], "operationId": "Blob_CopyFromURL", "description": "The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/AccessTierOptional" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/CopySource" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" }, { "$ref": "#/parameters/SourceContentMD5" } ], "responses": { "202": { "description": "The copy has completed.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "success" ], "x-ms-enum": { "name": "SyncCopyStatusType", "modelAsString": false } }, "Content-MD5": { "type": "string", "format": "byte", "description": "This response header is returned so that the client can check for the integrity of the copied content. This header is only returned if the source content MD5 was specified." }, "x-ms-content-crc64": { "type": "string", "format": "byte", "description": "This response header is returned so that the client can check for the integrity of the copied content." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "x-ms-requires-sync", "in": "header", "required": true, "type": "string", "enum": [ "true" ] } ] }, "/{containerName}/{blob}?comp=copy©id={CopyId}": { "put": { "tags": [ "blob" ], "operationId": "Blob_AbortCopyFromURL", "description": "The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination blob with zero length and full metadata.", "parameters": [ { "$ref": "#/parameters/CopyId" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "204": { "description": "The delete request was accepted and the blob will be deleted.", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "copy" ] }, { "name": "x-ms-copy-action", "x-ms-client-name": "copyActionAbortConstant", "in": "header", "required": true, "type": "string", "enum": [ "abort" ], "x-ms-parameter-location": "method" } ] }, "/{containerName}/{blob}?comp=tier": { "put": { "tags": [ "blobs" ], "operationId": "Blob_SetTier", "description": "The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/AccessTierRequired" }, { "$ref": "#/parameters/RehydratePriority" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" }, { "$ref": "#/parameters/LeaseIdOptional" } ], "responses": { "200": { "description": "The new tier will take effect immediately.", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and newer." } } }, "202": { "description": "The transition to the new tier is pending.", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and newer." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "tier" ] } ] }, "/{containerName}/{blob}?restype=account&comp=properties": { "get": { "tags": [ "blob" ], "operationId": "Blob_GetAccountInfo", "description": "Returns the sku name and account kind ", "parameters": [ { "$ref": "#/parameters/ApiVersionParameter" } ], "responses": { "200": { "description": "Success (OK)", "headers": { "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-sku-name": { "x-ms-client-name": "SkuName", "type": "string", "enum": [ "Standard_LRS", "Standard_GRS", "Standard_RAGRS", "Standard_ZRS", "Premium_LRS" ], "x-ms-enum": { "name": "SkuName", "modelAsString": false }, "description": "Identifies the sku name of the account" }, "x-ms-account-kind": { "x-ms-client-name": "AccountKind", "type": "string", "enum": [ "Storage", "BlobStorage", "StorageV2" ], "x-ms-enum": { "name": "AccountKind", "modelAsString": false }, "description": "Identifies the account kind" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "restype", "in": "query", "required": true, "type": "string", "enum": [ "account" ] }, { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}/{blob}?comp=block": { "put": { "tags": [ "blockblob" ], "operationId": "BlockBlob_StageBlock", "description": "The Stage Block operation creates a new block to be committed as part of a blob", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/BlockId" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/ContentMD5" }, { "$ref": "#/parameters/ContentCrc64" }, { "$ref": "#/parameters/Body" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block was created.", "headers": { "Content-MD5": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the Blob service; it is not necessarily the same value specified in the request headers." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-content-crc64": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the Blob service; it is not necessarily the same value specified in the request headers." }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the block. This header is only returned when the block was encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "block" ] } ] }, "/{containerName}/{blob}?comp=block&fromURL": { "put": { "tags": [ "blockblob" ], "operationId": "BlockBlob_StageBlockFromURL", "description": "The Stage Block operation creates a new block to be committed as part of a blob where the contents are read from a URL.", "parameters": [ { "$ref": "#/parameters/BlockId" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/SourceUrl" }, { "$ref": "#/parameters/SourceRange" }, { "$ref": "#/parameters/SourceContentMD5" }, { "$ref": "#/parameters/SourceContentCRC64" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block was created.", "headers": { "Content-MD5": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the Blob service; it is not necessarily the same value specified in the request headers." }, "x-ms-content-crc64": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the Blob service; it is not necessarily the same value specified in the request headers." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the block. This header is only returned when the block was encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "block" ] } ] }, "/{containerName}/{blob}?comp=blocklist": { "put": { "tags": [ "blockblob" ], "operationId": "BlockBlob_CommitBlockList", "description": "The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, then committing the new and existing blocks together. You can do this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the block, whichever list it may belong to.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/BlobCacheControl" }, { "$ref": "#/parameters/BlobContentType" }, { "$ref": "#/parameters/BlobContentEncoding" }, { "$ref": "#/parameters/BlobContentLanguage" }, { "$ref": "#/parameters/BlobContentMD5" }, { "$ref": "#/parameters/ContentMD5" }, { "$ref": "#/parameters/ContentCrc64" }, { "$ref": "#/parameters/Metadata" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobContentDisposition" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/AccessTierOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "name": "blocks", "in": "body", "required": true, "schema": { "$ref": "#/definitions/BlockLookupList" } }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block list was recorded.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. This header refers to the content of the request, meaning, in this case, the list of blocks, and not the content of the blob itself." }, "x-ms-content-crc64": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. This header refers to the content of the request, meaning, in this case, the list of blocks, and not the content of the blob itself." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the blob. This header is only returned when the blob was encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "get": { "tags": [ "blockblob" ], "operationId": "BlockBlob_GetBlockList", "description": "The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/BlockListType" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The page range was written.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Content-Type": { "type": "string", "description": "The media type of the body of the response. For Get Block List this is 'application/xml'" }, "x-ms-blob-content-length": { "x-ms-client-name": "BlobContentLength", "type": "integer", "format": "int64", "description": "The size of the blob in bytes." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/BlockList" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "blocklist" ] } ] }, "/{containerName}/{blob}?comp=page&update": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_UploadPages", "description": "The Upload Pages operation writes a range of pages to a page blob", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/Body" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/ContentMD5" }, { "$ref": "#/parameters/ContentCrc64" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfSequenceNumberLessThanOrEqualTo" }, { "$ref": "#/parameters/IfSequenceNumberLessThan" }, { "$ref": "#/parameters/IfSequenceNumberEqualTo" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The page range was written.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-content-crc64": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the Blob service; it is not necessarily the same value specified in the request headers." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for the page blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the pages. This header is only returned when the pages were encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "page" ] }, { "name": "x-ms-page-write", "x-ms-client-name": "pageWrite", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Required. You may specify one of the following options:\n - Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update.\n - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob size.", "type": "string", "enum": [ "update" ], "x-ms-enum": { "name": "PageWriteType", "modelAsString": false } } ] }, "/{containerName}/{blob}?comp=page&clear": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_ClearPages", "description": "The Clear Pages operation clears a set of pages from a page blob", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfSequenceNumberLessThanOrEqualTo" }, { "$ref": "#/parameters/IfSequenceNumberLessThan" }, { "$ref": "#/parameters/IfSequenceNumberEqualTo" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The page range was cleared.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-content-crc64": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the Blob service; it is not necessarily the same value specified in the request headers." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for the page blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "page" ] }, { "name": "x-ms-page-write", "x-ms-client-name": "pageWrite", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Required. You may specify one of the following options:\n - Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update.\n - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob size.", "type": "string", "enum": [ "clear" ], "x-ms-enum": { "name": "PageWriteType", "modelAsString": false } } ] }, "/{containerName}/{blob}?comp=page&update&fromUrl": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_UploadPagesFromURL", "description": "The Upload Pages operation writes a range of pages to a page blob where the contents are read from a URL", "consumes": [ "application/octet-stream" ], "parameters": [ { "$ref": "#/parameters/SourceUrl" }, { "$ref": "#/parameters/SourceRangeRequiredPutPageFromUrl" }, { "$ref": "#/parameters/SourceContentMD5" }, { "$ref": "#/parameters/SourceContentCRC64" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/RangeRequiredPutPageFromUrl" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfSequenceNumberLessThanOrEqualTo" }, { "$ref": "#/parameters/IfSequenceNumberLessThan" }, { "$ref": "#/parameters/IfSequenceNumberEqualTo" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The page range was written.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-content-crc64": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the Blob service; it is not necessarily the same value specified in the request headers." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for the page blob." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the blob. This header is only returned when the blob was encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "page" ] }, { "name": "x-ms-page-write", "x-ms-client-name": "pageWrite", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Required. You may specify one of the following options:\n - Update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update.\n - Clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, and the Range header to a value that indicates the range to clear, up to maximum blob size.", "type": "string", "enum": [ "update" ], "x-ms-enum": { "name": "PageWriteType", "modelAsString": false } } ] }, "/{containerName}/{blob}?comp=pagelist": { "get": { "tags": [ "pageblob" ], "operationId": "PageBlob_GetPageRanges", "description": "The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page blob", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Information on the page blob was found.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "x-ms-blob-content-length": { "x-ms-client-name": "BlobContentLength", "type": "integer", "format": "int64", "description": "The size of the blob in bytes." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/PageList" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "pagelist" ] } ] }, "/{containerName}/{blob}?comp=pagelist&diff": { "get": { "tags": [ "pageblob" ], "operationId": "PageBlob_GetPageRangesDiff", "description": "The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were changed between target blob and previous snapshot.", "parameters": [ { "$ref": "#/parameters/Snapshot" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/PrevSnapshot" }, { "$ref": "#/parameters/Range" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "Information on the page blob was found.", "headers": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "x-ms-blob-content-length": { "x-ms-client-name": "BlobContentLength", "type": "integer", "format": "int64", "description": "The size of the blob in bytes." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } }, "schema": { "$ref": "#/definitions/PageList" } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "pagelist" ] } ] }, "/{containerName}/{blob}?comp=properties&Resize": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_Resize", "description": "Resize the Blob", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/BlobContentLengthRequired" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The Blob was resized successfully", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}/{blob}?comp=properties&UpdateSequenceNumber": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_UpdateSequenceNumber", "description": "Update the sequence number of the blob", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/SequenceNumberAction" }, { "$ref": "#/parameters/BlobSequenceNumber" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "200": { "description": "The sequence numbers were updated successfully.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-blob-sequence-number": { "x-ms-client-name": "BlobSequenceNumber", "type": "integer", "format": "int64", "description": "The current sequence number for a page blob. This header is not returned for block blobs or append blobs" }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "properties" ] } ] }, "/{containerName}/{blob}?comp=incrementalcopy": { "put": { "tags": [ "pageblob" ], "operationId": "PageBlob_CopyIncremental", "description": "The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. This API is supported since REST version 2016-05-31.", "parameters": [ { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/CopySource" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "202": { "description": "The blob was copied.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-copy-id": { "x-ms-client-name": "CopyId", "type": "string", "description": "String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy operation, or pass to Abort Copy Blob to abort a pending copy." }, "x-ms-copy-status": { "x-ms-client-name": "CopyStatus", "description": "State of the copy operation identified by x-ms-copy-id.", "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "incrementalcopy" ] } ] }, "/{containerName}/{blob}?comp=appendblock": { "put": { "tags": [ "appendblob" ], "consumes": [ "application/octet-stream" ], "operationId": "AppendBlob_AppendBlock", "description": "The Append Block operation commits a new block of data to the end of an existing append blob. The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.", "parameters": [ { "$ref": "#/parameters/Body" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/ContentMD5" }, { "$ref": "#/parameters/ContentCrc64" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobConditionMaxSize" }, { "$ref": "#/parameters/BlobConditionAppendPos" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block was created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-content-crc64": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the Blob service; it is not necessarily the same value specified in the request headers." }, "x-ms-client-request-id": { "x-ms-client-name": "ClientRequestId", "type": "string", "description": "If a client request id header is sent in the request, this header will be present in the response with the same value." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-append-offset": { "x-ms-client-name": "BlobAppendOffset", "type": "string", "description": "This response header is returned only for append operations. It returns the offset at which the block was committed, in bytes." }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the block. This header is only returned when the block was encrypted with a customer-provided key." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "appendblock" ] } ] }, "/{containerName}/{blob}?comp=appendblock&fromUrl": { "put": { "tags": [ "appendblob" ], "operationId": "AppendBlob_AppendBlockFromUrl", "description": "The Append Block operation commits a new block of data to the end of an existing append blob where the contents are read from a source url. The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.", "parameters": [ { "$ref": "#/parameters/SourceUrl" }, { "$ref": "#/parameters/SourceRange" }, { "$ref": "#/parameters/SourceContentMD5" }, { "$ref": "#/parameters/SourceContentCRC64" }, { "$ref": "#/parameters/Timeout" }, { "$ref": "#/parameters/ContentLength" }, { "$ref": "#/parameters/ContentMD5" }, { "$ref": "#/parameters/EncryptionKey" }, { "$ref": "#/parameters/EncryptionKeySha256" }, { "$ref": "#/parameters/EncryptionAlgorithm" }, { "$ref": "#/parameters/LeaseIdOptional" }, { "$ref": "#/parameters/BlobConditionMaxSize" }, { "$ref": "#/parameters/BlobConditionAppendPos" }, { "$ref": "#/parameters/IfModifiedSince" }, { "$ref": "#/parameters/IfUnmodifiedSince" }, { "$ref": "#/parameters/IfMatch" }, { "$ref": "#/parameters/IfNoneMatch" }, { "$ref": "#/parameters/SourceIfModifiedSince" }, { "$ref": "#/parameters/SourceIfUnmodifiedSince" }, { "$ref": "#/parameters/SourceIfMatch" }, { "$ref": "#/parameters/SourceIfNoneMatch" }, { "$ref": "#/parameters/ApiVersionParameter" }, { "$ref": "#/parameters/ClientRequestId" } ], "responses": { "201": { "description": "The block was created.", "headers": { "ETag": { "type": "string", "format": "etag", "description": "The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes." }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123", "description": "Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob." }, "Content-MD5": { "type": "string", "format": "byte", "description": "If the blob has an MD5 hash and this operation is to read the full blob, this response header is returned so that the client can check for message content integrity." }, "x-ms-content-crc64": { "type": "string", "format": "byte", "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the Blob service; it is not necessarily the same value specified in the request headers." }, "x-ms-request-id": { "x-ms-client-name": "RequestId", "type": "string", "description": "This header uniquely identifies the request that was made and can be used for troubleshooting the request." }, "x-ms-version": { "x-ms-client-name": "Version", "type": "string", "description": "Indicates the version of the Blob service used to execute the request. This header is returned for requests made against version 2009-09-19 and above." }, "Date": { "type": "string", "format": "date-time-rfc1123", "description": "UTC date/time value generated by the service that indicates the time at which the response was initiated" }, "x-ms-blob-append-offset": { "x-ms-client-name": "BlobAppendOffset", "type": "string", "description": "This response header is returned only for append operations. It returns the offset at which the block was committed, in bytes." }, "x-ms-blob-committed-block-count": { "x-ms-client-name": "BlobCommittedBlockCount", "type": "integer", "description": "The number of committed blocks present in the blob. This header is returned only for append blobs." }, "x-ms-encryption-key-sha256": { "x-ms-client-name": "EncryptionKeySha256", "type": "string", "description": "The SHA-256 hash of the encryption key used to encrypt the block. This header is only returned when the block was encrypted with a customer-provided key." }, "x-ms-request-server-encrypted": { "x-ms-client-name": "IsServerEncrypted", "type": "boolean", "description": "The value of this header is set to true if the contents of the request are successfully encrypted using the specified algorithm, and false otherwise." } } }, "default": { "description": "Failure", "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }, "schema": { "$ref": "#/definitions/StorageError" } } } }, "parameters": [ { "name": "comp", "in": "query", "required": true, "type": "string", "enum": [ "appendblock" ] } ] } }, "definitions": { "KeyInfo": { "type": "object", "required": [ "Start", "Expiry" ], "description": "Key information", "properties": { "Start": { "description": "The date-time the key is active in ISO 8601 UTC time", "type": "string" }, "Expiry": { "description": "The date-time the key expires in ISO 8601 UTC time", "type": "string" } } }, "UserDelegationKey": { "type": "object", "required": [ "SignedOid", "SignedTid", "SignedStart", "SignedExpiry", "SignedService", "SignedVersion", "Value" ], "description": "A user delegation key", "properties": { "SignedOid": { "description": "The Azure Active Directory object ID in GUID format.", "type": "string" }, "SignedTid": { "description": "The Azure Active Directory tenant ID in GUID format", "type": "string" }, "SignedStart": { "description": "The date-time the key is active", "type": "string", "format": "date-time" }, "SignedExpiry": { "description": "The date-time the key expires", "type": "string", "format": "date-time" }, "SignedService": { "description": "Abbreviation of the Azure Storage service that accepts the key", "type": "string" }, "SignedVersion": { "description": "The service version that created the key", "type": "string" }, "Value": { "description": "The key as a base64 string", "type": "string" } } }, "PublicAccessType": { "type": "string", "enum": [ "container", "blob" ], "x-ms-enum": { "name": "PublicAccessType", "modelAsString": true } }, "CopyStatus": { "type": "string", "enum": [ "pending", "success", "aborted", "failed" ], "x-ms-enum": { "name": "CopyStatusType", "modelAsString": false } }, "LeaseDuration": { "type": "string", "enum": [ "infinite", "fixed" ], "x-ms-enum": { "name": "LeaseDurationType", "modelAsString": false } }, "LeaseState": { "type": "string", "enum": [ "available", "leased", "expired", "breaking", "broken" ], "x-ms-enum": { "name": "LeaseStateType", "modelAsString": false } }, "LeaseStatus": { "type": "string", "enum": [ "locked", "unlocked" ], "x-ms-enum": { "name": "LeaseStatusType", "modelAsString": false } }, "StorageError": { "type": "object", "properties": { "Message": { "type": "string" } } }, "DataLakeStorageError": { "type": "object", "properties": { "error": { "description": "The service error response object.", "properties": { "Code": { "description": "The service error code.", "type": "string" }, "Message": { "description": "The service error message.", "type": "string" } } } } }, "AccessPolicy": { "type": "object", "required": [ "Start", "Expiry", "Permission" ], "description": "An Access policy", "properties": { "Start": { "description": "the date-time the policy is active", "type": "string", "format": "date-time" }, "Expiry": { "description": "the date-time the policy expires", "type": "string", "format": "date-time" }, "Permission": { "description": "the permissions for the acl policy", "type": "string" } } }, "AccessTier": { "type": "string", "enum": [ "P4", "P6", "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive" ], "x-ms-enum": { "name": "AccessTier", "modelAsString": true } }, "ArchiveStatus": { "type": "string", "enum": [ "rehydrate-pending-to-hot", "rehydrate-pending-to-cool" ], "x-ms-enum": { "name": "ArchiveStatus", "modelAsString": true } }, "BlobItem": { "xml": { "name": "Blob" }, "description": "An Azure Storage blob", "type": "object", "required": [ "Name", "Deleted", "Snapshot", "Properties" ], "properties": { "Name": { "type": "string" }, "Deleted": { "type": "boolean" }, "Snapshot": { "type": "string" }, "Properties": { "$ref": "#/definitions/BlobProperties" }, "Metadata": { "$ref": "#/definitions/BlobMetadata" } } }, "BlobProperties": { "xml": { "name": "Properties" }, "description": "Properties of a blob", "type": "object", "required": [ "Etag", "Last-Modified" ], "properties": { "Creation-Time": { "type": "string", "format": "date-time-rfc1123" }, "Last-Modified": { "type": "string", "format": "date-time-rfc1123" }, "Etag": { "type": "string", "format": "etag" }, "Content-Length": { "type": "integer", "format": "int64", "description": "Size in bytes" }, "Content-Type": { "type": "string" }, "Content-Encoding": { "type": "string" }, "Content-Language": { "type": "string" }, "Content-MD5": { "type": "string", "format": "byte" }, "Content-Disposition": { "type": "string" }, "Cache-Control": { "type": "string" }, "x-ms-blob-sequence-number": { "x-ms-client-name": "blobSequenceNumber", "type": "integer", "format": "int64" }, "BlobType": { "type": "string", "enum": [ "BlockBlob", "PageBlob", "AppendBlob" ], "x-ms-enum": { "name": "BlobType", "modelAsString": false } }, "LeaseStatus": { "$ref": "#/definitions/LeaseStatus" }, "LeaseState": { "$ref": "#/definitions/LeaseState" }, "LeaseDuration": { "$ref": "#/definitions/LeaseDuration" }, "CopyId": { "type": "string" }, "CopyStatus": { "$ref": "#/definitions/CopyStatus" }, "CopySource": { "type": "string" }, "CopyProgress": { "type": "string" }, "CopyCompletionTime": { "type": "string", "format": "date-time-rfc1123" }, "CopyStatusDescription": { "type": "string" }, "ServerEncrypted": { "type": "boolean" }, "IncrementalCopy": { "type": "boolean" }, "DestinationSnapshot": { "type": "string" }, "DeletedTime": { "type": "string", "format": "date-time-rfc1123" }, "RemainingRetentionDays": { "type": "integer" }, "AccessTier": { "$ref": "#/definitions/AccessTier" }, "AccessTierInferred": { "type": "boolean" }, "ArchiveStatus": { "$ref": "#/definitions/ArchiveStatus" }, "CustomerProvidedKeySha256": { "type": "string" }, "AccessTierChangeTime": { "type": "string", "format": "date-time-rfc1123" } } }, "ListBlobsFlatSegmentResponse": { "xml": { "name": "EnumerationResults" }, "description": "An enumeration of blobs", "type": "object", "required": [ "ServiceEndpoint", "ContainerName", "Segment" ], "properties": { "ServiceEndpoint": { "type": "string", "xml": { "attribute": true } }, "ContainerName": { "type": "string", "xml": { "attribute": true } }, "Prefix": { "type": "string" }, "Marker": { "type": "string" }, "MaxResults": { "type": "integer" }, "Segment": { "$ref": "#/definitions/BlobFlatListSegment" }, "NextMarker": { "type": "string" } } }, "ListBlobsHierarchySegmentResponse": { "xml": { "name": "EnumerationResults" }, "description": "An enumeration of blobs", "type": "object", "required": [ "ServiceEndpoint", "ContainerName", "Segment" ], "properties": { "ServiceEndpoint": { "type": "string", "xml": { "attribute": true } }, "ContainerName": { "type": "string", "xml": { "attribute": true } }, "Prefix": { "type": "string" }, "Marker": { "type": "string" }, "MaxResults": { "type": "integer" }, "Delimiter": { "type": "string" }, "Segment": { "$ref": "#/definitions/BlobHierarchyListSegment" }, "NextMarker": { "type": "string" } } }, "BlobFlatListSegment": { "xml": { "name": "Blobs" }, "required": [ "BlobItems" ], "type": "object", "properties": { "BlobItems": { "type": "array", "items": { "$ref": "#/definitions/BlobItem" } } } }, "BlobHierarchyListSegment": { "xml": { "name": "Blobs" }, "type": "object", "required": [ "BlobItems" ], "properties": { "BlobPrefixes": { "type": "array", "items": { "$ref": "#/definitions/BlobPrefix" } }, "BlobItems": { "type": "array", "items": { "$ref": "#/definitions/BlobItem" } } } }, "BlobPrefix": { "type": "object", "required": [ "Name" ], "properties": { "Name": { "type": "string" } } }, "Block": { "type": "object", "required": [ "Name", "Size" ], "description": "Represents a single block in a block blob. It describes the block's ID and size.", "properties": { "Name": { "description": "The base64 encoded block ID.", "type": "string" }, "Size": { "description": "The block size in bytes.", "type": "integer" } } }, "BlockList": { "type": "object", "properties": { "CommittedBlocks": { "xml": { "wrapped": true }, "type": "array", "items": { "$ref": "#/definitions/Block" } }, "UncommittedBlocks": { "xml": { "wrapped": true }, "type": "array", "items": { "$ref": "#/definitions/Block" } } } }, "BlockLookupList": { "type": "object", "properties": { "Committed": { "type": "array", "items": { "type": "string", "xml": { "name": "Committed" } } }, "Uncommitted": { "type": "array", "items": { "type": "string", "xml": { "name": "Uncommitted" } } }, "Latest": { "type": "array", "items": { "type": "string", "xml": { "name": "Latest" } } } }, "xml": { "name": "BlockList" } }, "ContainerItem": { "xml": { "name": "Container" }, "type": "object", "required": [ "Name", "Properties" ], "description": "An Azure Storage container", "properties": { "Name": { "type": "string" }, "Properties": { "$ref": "#/definitions/ContainerProperties" }, "Metadata": { "$ref": "#/definitions/ContainerMetadata" } } }, "ContainerProperties": { "type": "object", "required": [ "Last-Modified", "Etag" ], "description": "Properties of a container", "properties": { "Last-Modified": { "type": "string", "format": "date-time-rfc1123" }, "Etag": { "type": "string", "format": "etag" }, "LeaseStatus": { "$ref": "#/definitions/LeaseStatus" }, "LeaseState": { "$ref": "#/definitions/LeaseState" }, "LeaseDuration": { "$ref": "#/definitions/LeaseDuration" }, "PublicAccess": { "$ref": "#/definitions/PublicAccessType" }, "HasImmutabilityPolicy": { "type": "boolean" }, "HasLegalHold": { "type": "boolean" } } }, "ListContainersSegmentResponse": { "xml": { "name": "EnumerationResults" }, "description": "An enumeration of containers", "type": "object", "required": [ "ServiceEndpoint", "ContainerItems" ], "properties": { "ServiceEndpoint": { "type": "string", "xml": { "attribute": true } }, "Prefix": { "type": "string" }, "Marker": { "type": "string" }, "MaxResults": { "type": "integer" }, "ContainerItems": { "xml": { "wrapped": true, "name": "Containers" }, "type": "array", "items": { "$ref": "#/definitions/ContainerItem" } }, "NextMarker": { "type": "string" } } }, "CorsRule": { "description": "CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain", "type": "object", "required": [ "AllowedOrigins", "AllowedMethods", "AllowedHeaders", "ExposedHeaders", "MaxAgeInSeconds" ], "properties": { "AllowedOrigins": { "description": "The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS.", "type": "string" }, "AllowedMethods": { "description": "The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated)", "type": "string" }, "AllowedHeaders": { "description": "the request headers that the origin domain may specify on the CORS request.", "type": "string" }, "ExposedHeaders": { "description": "The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer", "type": "string" }, "MaxAgeInSeconds": { "description": "The maximum amount time that a browser should cache the preflight OPTIONS request.", "type": "integer", "minimum": 0 } } }, "ErrorCode": { "description": "Error codes returned by the service", "type": "string", "enum": [ "AccountAlreadyExists", "AccountBeingCreated", "AccountIsDisabled", "AuthenticationFailed", "AuthorizationFailure", "ConditionHeadersNotSupported", "ConditionNotMet", "EmptyMetadataKey", "InsufficientAccountPermissions", "InternalError", "InvalidAuthenticationInfo", "InvalidHeaderValue", "InvalidHttpVerb", "InvalidInput", "InvalidMd5", "InvalidMetadata", "InvalidQueryParameterValue", "InvalidRange", "InvalidResourceName", "InvalidUri", "InvalidXmlDocument", "InvalidXmlNodeValue", "Md5Mismatch", "MetadataTooLarge", "MissingContentLengthHeader", "MissingRequiredQueryParameter", "MissingRequiredHeader", "MissingRequiredXmlNode", "MultipleConditionHeadersNotSupported", "OperationTimedOut", "OutOfRangeInput", "OutOfRangeQueryParameterValue", "RequestBodyTooLarge", "ResourceTypeMismatch", "RequestUrlFailedToParse", "ResourceAlreadyExists", "ResourceNotFound", "ServerBusy", "UnsupportedHeader", "UnsupportedXmlNode", "UnsupportedQueryParameter", "UnsupportedHttpVerb", "AppendPositionConditionNotMet", "BlobAlreadyExists", "BlobNotFound", "BlobOverwritten", "BlobTierInadequateForContentLength", "BlockCountExceedsLimit", "BlockListTooLong", "CannotChangeToLowerTier", "CannotVerifyCopySource", "ContainerAlreadyExists", "ContainerBeingDeleted", "ContainerDisabled", "ContainerNotFound", "ContentLengthLargerThanTierLimit", "CopyAcrossAccountsNotSupported", "CopyIdMismatch", "FeatureVersionMismatch", "IncrementalCopyBlobMismatch", "IncrementalCopyOfEralierVersionSnapshotNotAllowed", "IncrementalCopySourceMustBeSnapshot", "InfiniteLeaseDurationRequired", "InvalidBlobOrBlock", "InvalidBlobTier", "InvalidBlobType", "InvalidBlockId", "InvalidBlockList", "InvalidOperation", "InvalidPageRange", "InvalidSourceBlobType", "InvalidSourceBlobUrl", "InvalidVersionForPageBlobOperation", "LeaseAlreadyPresent", "LeaseAlreadyBroken", "LeaseIdMismatchWithBlobOperation", "LeaseIdMismatchWithContainerOperation", "LeaseIdMismatchWithLeaseOperation", "LeaseIdMissing", "LeaseIsBreakingAndCannotBeAcquired", "LeaseIsBreakingAndCannotBeChanged", "LeaseIsBrokenAndCannotBeRenewed", "LeaseLost", "LeaseNotPresentWithBlobOperation", "LeaseNotPresentWithContainerOperation", "LeaseNotPresentWithLeaseOperation", "MaxBlobSizeConditionNotMet", "NoPendingCopyOperation", "OperationNotAllowedOnIncrementalCopyBlob", "PendingCopyOperation", "PreviousSnapshotCannotBeNewer", "PreviousSnapshotNotFound", "PreviousSnapshotOperationNotSupported", "SequenceNumberConditionNotMet", "SequenceNumberIncrementTooLarge", "SnapshotCountExceeded", "SnaphotOperationRateExceeded", "SnapshotsPresent", "SourceConditionNotMet", "SystemInUse", "TargetConditionNotMet", "UnauthorizedBlobOverwrite", "BlobBeingRehydrated", "BlobArchived", "BlobNotArchived", "AuthorizationSourceIPMismatch", "AuthorizationProtocolMismatch", "AuthorizationPermissionMismatch", "AuthorizationServiceMismatch", "AuthorizationResourceTypeMismatch" ], "x-ms-enum": { "name": "StorageErrorCode", "modelAsString": true } }, "GeoReplication": { "description": "Geo-Replication information for the Secondary Storage Service", "type": "object", "required": [ "Status", "LastSyncTime" ], "properties": { "Status": { "description": "The status of the secondary location", "type": "string", "enum": [ "live", "bootstrap", "unavailable" ], "x-ms-enum": { "name": "GeoReplicationStatusType", "modelAsString": true } }, "LastSyncTime": { "description": "A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads.", "type": "string", "format": "date-time-rfc1123" } } }, "Logging": { "description": "Azure Analytics Logging settings.", "type": "object", "required": [ "Version", "Delete", "Read", "Write", "RetentionPolicy" ], "properties": { "Version": { "description": "The version of Storage Analytics to configure.", "type": "string" }, "Delete": { "description": "Indicates whether all delete requests should be logged.", "type": "boolean" }, "Read": { "description": "Indicates whether all read requests should be logged.", "type": "boolean" }, "Write": { "description": "Indicates whether all write requests should be logged.", "type": "boolean" }, "RetentionPolicy": { "$ref": "#/definitions/RetentionPolicy" } } }, "ContainerMetadata": { "type": "object", "xml": { "name": "Metadata" }, "additionalProperties": { "type": "string" } }, "BlobMetadata": { "type": "object", "xml": { "name": "Metadata" }, "properties": { "Encrypted": { "type": "string", "xml": { "attribute": true } } }, "additionalProperties": { "type": "string" } }, "Metrics": { "description": "a summary of request statistics grouped by API in hour or minute aggregates for blobs", "required": [ "Enabled" ], "properties": { "Version": { "description": "The version of Storage Analytics to configure.", "type": "string" }, "Enabled": { "description": "Indicates whether metrics are enabled for the Blob service.", "type": "boolean" }, "IncludeAPIs": { "description": "Indicates whether metrics should generate summary statistics for called API operations.", "type": "boolean" }, "RetentionPolicy": { "$ref": "#/definitions/RetentionPolicy" } } }, "PageList": { "description": "the list of pages", "type": "object", "properties": { "PageRange": { "type": "array", "items": { "$ref": "#/definitions/PageRange" } }, "ClearRange": { "type": "array", "items": { "$ref": "#/definitions/ClearRange" } } } }, "PageRange": { "type": "object", "required": [ "Start", "End" ], "properties": { "Start": { "type": "integer", "format": "int64", "xml": { "name": "Start" } }, "End": { "type": "integer", "format": "int64", "xml": { "name": "End" } } }, "xml": { "name": "PageRange" } }, "ClearRange": { "type": "object", "required": [ "Start", "End" ], "properties": { "Start": { "type": "integer", "format": "int64", "xml": { "name": "Start" } }, "End": { "type": "integer", "format": "int64", "xml": { "name": "End" } } }, "xml": { "name": "ClearRange" } }, "RetentionPolicy": { "description": "the retention policy which determines how long the associated data should persist", "type": "object", "required": [ "Enabled" ], "properties": { "Enabled": { "description": "Indicates whether a retention policy is enabled for the storage service", "type": "boolean" }, "Days": { "description": "Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted", "type": "integer", "minimum": 1 } } }, "SignedIdentifier": { "xml": { "name": "SignedIdentifier" }, "description": "signed identifier", "type": "object", "required": [ "Id", "AccessPolicy" ], "properties": { "Id": { "type": "string", "description": "a unique id" }, "AccessPolicy": { "$ref": "#/definitions/AccessPolicy" } } }, "SignedIdentifiers": { "description": "a collection of signed identifiers", "type": "array", "items": { "$ref": "#/definitions/SignedIdentifier" }, "xml": { "wrapped": true, "name": "SignedIdentifiers" } }, "StaticWebsite": { "description": "The properties that enable an account to host a static website", "type": "object", "required": [ "Enabled" ], "properties": { "Enabled": { "description": "Indicates whether this account is hosting a static website", "type": "boolean" }, "IndexDocument": { "description": "The default name of the index page under each directory", "type": "string" }, "ErrorDocument404Path": { "description": "The absolute path of the custom 404 page", "type": "string" } } }, "StorageServiceProperties": { "description": "Storage Service Properties.", "type": "object", "properties": { "Logging": { "$ref": "#/definitions/Logging" }, "HourMetrics": { "$ref": "#/definitions/Metrics" }, "MinuteMetrics": { "$ref": "#/definitions/Metrics" }, "Cors": { "description": "The set of CORS rules.", "type": "array", "items": { "$ref": "#/definitions/CorsRule" }, "xml": { "wrapped": true } }, "DefaultServiceVersion": { "description": "The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27 and all more recent versions", "type": "string" }, "DeleteRetentionPolicy": { "$ref": "#/definitions/RetentionPolicy" }, "StaticWebsite": { "$ref": "#/definitions/StaticWebsite" } } }, "StorageServiceStats": { "description": "Stats for the storage service.", "type": "object", "properties": { "GeoReplication": { "$ref": "#/definitions/GeoReplication" } } } }, "parameters": { "Url": { "name": "url", "description": "The URL of the service account, container, or blob that is the targe of the desired operation.", "required": true, "type": "string", "in": "path", "x-ms-skip-url-encoding": true }, "ApiVersionParameter": { "name": "x-ms-version", "x-ms-client-name": "version", "in": "header", "required": true, "type": "string", "description": "Specifies the version of the operation to use for this request.", "enum": [ "2019-02-02" ] }, "Blob": { "name": "blob", "in": "path", "required": true, "type": "string", "pattern": "^[a-zA-Z0-9]+(?:/[a-zA-Z0-9]+)*(?:\\.[a-zA-Z0-9]+){0,1}$", "minLength": 1, "maxLength": 1024, "x-ms-parameter-location": "method", "description": "The blob name." }, "Filesystem": { "name": "filesystem", "in": "path", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "The filesystem name." }, "Path": { "name": "path", "in": "path", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "The namespace path to a file or directory." }, "BlobCacheControl": { "name": "x-ms-blob-cache-control", "x-ms-client-name": "blobCacheControl", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request." }, "BlobConditionAppendPos": { "name": "x-ms-blob-condition-appendpos", "x-ms-client-name": "appendPosition", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "append-position-access-conditions" }, "description": "Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed)." }, "BlobConditionMaxSize": { "name": "x-ms-blob-condition-maxsize", "x-ms-client-name": "maxSize", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "append-position-access-conditions" }, "description": "Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed)." }, "BlobPublicAccess": { "name": "x-ms-blob-public-access", "x-ms-client-name": "access", "in": "header", "required": false, "x-ms-parameter-location": "method", "description": "Specifies whether data in the container may be accessed publicly and the level of access", "type": "string", "enum": [ "container", "blob" ], "x-ms-enum": { "name": "PublicAccessType", "modelAsString": true } }, "AccessTierRequired": { "name": "x-ms-access-tier", "x-ms-client-name": "tier", "in": "header", "required": true, "type": "string", "enum": [ "P4", "P6", "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive" ], "x-ms-enum": { "name": "AccessTier", "modelAsString": true }, "x-ms-parameter-location": "method", "description": "Indicates the tier to be set on the blob." }, "AccessTierOptional": { "name": "x-ms-access-tier", "x-ms-client-name": "tier", "in": "header", "required": false, "type": "string", "enum": [ "P4", "P6", "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive" ], "x-ms-enum": { "name": "AccessTier", "modelAsString": true }, "x-ms-parameter-location": "method", "description": "Optional. Indicates the tier to be set on the blob." }, "PremiumPageBlobAccessTierOptional": { "name": "x-ms-access-tier", "x-ms-client-name": "tier", "in": "header", "required": false, "type": "string", "enum": [ "P4", "P6", "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80" ], "x-ms-enum": { "name": "PremiumPageBlobAccessTier", "modelAsString": true }, "x-ms-parameter-location": "method", "description": "Optional. Indicates the tier to be set on the page blob." }, "RehydratePriority": { "name": "x-ms-rehydrate-priority", "x-ms-client-name": "rehydratePriority", "in": "header", "required": false, "type": "string", "enum": [ "High", "Standard" ], "x-ms-enum": { "name": "RehydratePriority", "modelAsString": true }, "x-ms-parameter-location": "method", "description": "Optional: Indicates the priority with which to rehydrate an archived blob." }, "BlobContentDisposition": { "name": "x-ms-blob-content-disposition", "x-ms-client-name": "blobContentDisposition", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Sets the blob's Content-Disposition header." }, "BlobContentEncoding": { "name": "x-ms-blob-content-encoding", "x-ms-client-name": "blobContentEncoding", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read request." }, "BlobContentLanguage": { "name": "x-ms-blob-content-language", "x-ms-client-name": "blobContentLanguage", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read request." }, "BlobContentLengthOptional": { "name": "x-ms-blob-content-length", "x-ms-client-name": "blobContentLength", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "description": "This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary." }, "BlobContentLengthRequired": { "name": "x-ms-blob-content-length", "x-ms-client-name": "blobContentLength", "in": "header", "required": true, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "description": "This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary." }, "BlobContentMD5": { "name": "x-ms-blob-content-md5", "x-ms-client-name": "blobContentMD5", "in": "header", "required": false, "type": "string", "format": "byte", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were validated when each was uploaded." }, "BlobContentType": { "name": "x-ms-blob-content-type", "x-ms-client-name": "blobContentType", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "blob-HTTP-headers" }, "description": "Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request." }, "BlobSequenceNumber": { "name": "x-ms-blob-sequence-number", "x-ms-client-name": "blobSequenceNumber", "in": "header", "required": false, "type": "integer", "format": "int64", "default": 0, "x-ms-parameter-location": "method", "description": "Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be between 0 and 2^63 - 1." }, "BlockId": { "name": "blockid", "x-ms-client-name": "blockId", "in": "query", "type": "string", "required": true, "x-ms-parameter-location": "method", "description": "A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the same size for each block." }, "BlockListType": { "name": "blocklisttype", "x-ms-client-name": "listType", "in": "query", "required": true, "default": "committed", "x-ms-parameter-location": "method", "description": "Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together.", "type": "string", "enum": [ "committed", "uncommitted", "all" ], "x-ms-enum": { "name": "BlockListType", "modelAsString": false } }, "Body": { "name": "body", "in": "body", "required": true, "schema": { "type": "object", "format": "file" }, "x-ms-parameter-location": "method", "description": "Initial data" }, "Continuation": { "name": "continuation", "x-ms-client-name": "marker", "in": "query", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "When renaming a directory, the number of paths that are renamed with each invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the rename operation to continue renaming the directory." }, "ContainerAcl": { "name": "containerAcl", "in": "body", "schema": { "$ref": "#/definitions/SignedIdentifiers" }, "x-ms-parameter-location": "method", "description": "the acls for the container" }, "CopyId": { "name": "copyid", "x-ms-client-name": "copyId", "in": "query", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation." }, "ClientRequestId": { "name": "x-ms-client-request-id", "x-ms-client-name": "requestId", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled." }, "ContainerName": { "name": "containerName", "in": "path", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "The container name." }, "ContentCrc64": { "name": "x-ms-content-crc64", "x-ms-client-name": "transactionalContentCrc64", "in": "header", "required": false, "type": "string", "format": "byte", "x-ms-parameter-location": "method", "description": "Specify the transactional crc64 for the body, to be validated by the service." }, "ContentLength": { "name": "Content-Length", "in": "header", "required": true, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "description": "The length of the request." }, "ContentMD5": { "name": "Content-MD5", "x-ms-client-name": "transactionalContentMD5", "in": "header", "required": false, "type": "string", "format": "byte", "x-ms-parameter-location": "method", "description": "Specify the transactional md5 for the body, to be validated by the service." }, "CopySource": { "name": "x-ms-copy-source", "x-ms-client-name": "copySource", "in": "header", "required": true, "type": "string", "format": "url", "x-ms-parameter-location": "method", "description": "Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob must either be public or must be authenticated via a shared access signature." }, "DeleteSnapshots": { "name": "x-ms-delete-snapshots", "x-ms-client-name": "deleteSnapshots", "description": "Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself", "x-ms-parameter-location": "method", "in": "header", "required": false, "type": "string", "enum": [ "include", "only" ], "x-ms-enum": { "name": "DeleteSnapshotsOptionType", "modelAsString": false } }, "Delimiter": { "name": "delimiter", "description": "When the request includes this parameter, the operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may be a single character or a string.", "type": "string", "x-ms-parameter-location": "method", "in": "query", "required": true }, "DirectoryProperties": { "name": "x-ms-properties", "description": "Optional. User-defined properties to be stored with the file or directory, in the format of a comma-separated list of name and value pairs \"n1=v1, n2=v2, ...\", where each value is base64 encoded.", "x-ms-client-name": "directoryProperties", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method" }, "EncryptionKey": { "name": "x-ms-encryption-key", "x-ms-client-name": "encryptionKey", "type": "string", "in": "header", "required": false, "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "cpk-info" }, "description": "Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services." }, "EncryptionKeySha256": { "name": "x-ms-encryption-key-sha256", "x-ms-client-name": "encryptionKeySha256", "type": "string", "in": "header", "required": false, "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "cpk-info" }, "description": "The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided." }, "EncryptionAlgorithm": { "name": "x-ms-encryption-algorithm", "x-ms-client-name": "encryptionAlgorithm", "type": "string", "in": "header", "required": false, "enum": [ "AES256" ], "x-ms-enum": { "name": "EncryptionAlgorithmType", "modelAsString": false }, "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "cpk-info" }, "description": "The algorithm used to produce the encryption key hash. Currently, the only accepted value is \"AES256\". Must be provided if the x-ms-encryption-key header is provided." }, "FileRenameSource": { "name": "x-ms-rename-source", "x-ms-client-name": "renameSource", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "The file or directory to be renamed. The value must have the following format: \"/{filesysystem}/{path}\". If \"x-ms-properties\" is specified, the properties will overwrite the existing properties; otherwise, the existing properties will be preserved." }, "GetRangeContentMD5": { "name": "x-ms-range-get-content-md5", "x-ms-client-name": "rangeGetContentMD5", "in": "header", "required": false, "type": "boolean", "x-ms-parameter-location": "method", "description": "When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size." }, "GetRangeContentCRC64": { "name": "x-ms-range-get-content-crc64", "x-ms-client-name": "rangeGetContentCRC64", "in": "header", "required": false, "type": "boolean", "x-ms-parameter-location": "method", "description": "When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 MB in size." }, "IfMatch": { "name": "If-Match", "x-ms-client-name": "ifMatch", "in": "header", "required": false, "type": "string", "format": "etag", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "modified-access-conditions" }, "description": "Specify an ETag value to operate only on blobs with a matching value." }, "IfModifiedSince": { "name": "If-Modified-Since", "x-ms-client-name": "ifModifiedSince", "in": "header", "required": false, "type": "string", "format": "date-time-rfc1123", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "modified-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has been modified since the specified date/time." }, "IfNoneMatch": { "name": "If-None-Match", "x-ms-client-name": "ifNoneMatch", "in": "header", "required": false, "type": "string", "format": "etag", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "modified-access-conditions" }, "description": "Specify an ETag value to operate only on blobs without a matching value." }, "IfUnmodifiedSince": { "name": "If-Unmodified-Since", "x-ms-client-name": "ifUnmodifiedSince", "in": "header", "required": false, "type": "string", "format": "date-time-rfc1123", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "modified-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has not been modified since the specified date/time." }, "IfSequenceNumberEqualTo": { "name": "x-ms-if-sequence-number-eq", "x-ms-client-name": "ifSequenceNumberEqualTo", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "sequence-number-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has the specified sequence number." }, "IfSequenceNumberLessThan": { "name": "x-ms-if-sequence-number-lt", "x-ms-client-name": "ifSequenceNumberLessThan", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "sequence-number-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has a sequence number less than the specified." }, "IfSequenceNumberLessThanOrEqualTo": { "name": "x-ms-if-sequence-number-le", "x-ms-client-name": "ifSequenceNumberLessThanOrEqualTo", "in": "header", "required": false, "type": "integer", "format": "int64", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "sequence-number-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified." }, "KeyInfo": { "name": "KeyInfo", "in": "body", "x-ms-parameter-location": "method", "required": true, "schema": { "$ref": "#/definitions/KeyInfo" } }, "ListBlobsInclude": { "name": "include", "in": "query", "required": false, "type": "array", "collectionFormat": "csv", "items": { "type": "string", "enum": [ "copy", "deleted", "metadata", "snapshots", "uncommittedblobs" ], "x-ms-enum": { "name": "ListBlobsIncludeItem", "modelAsString": false } }, "x-ms-parameter-location": "method", "description": "Include this parameter to specify one or more datasets to include in the response." }, "ListContainersInclude": { "name": "include", "in": "query", "required": false, "type": "string", "enum": [ "metadata" ], "x-ms-enum": { "name": "ListContainersIncludeType", "modelAsString": false }, "x-ms-parameter-location": "method", "description": "Include this parameter to specify that the container's metadata be returned as part of the response body." }, "LeaseBreakPeriod": { "name": "x-ms-lease-break-period", "x-ms-client-name": "breakPeriod", "in": "header", "required": false, "type": "integer", "x-ms-parameter-location": "method", "description": "For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately." }, "LeaseDuration": { "name": "x-ms-lease-duration", "x-ms-client-name": "duration", "in": "header", "required": false, "type": "integer", "x-ms-parameter-location": "method", "description": "Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change." }, "LeaseIdOptional": { "name": "x-ms-lease-id", "x-ms-client-name": "leaseId", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "lease-access-conditions" }, "description": "If specified, the operation only succeeds if the resource's lease is active and matches this ID." }, "LeaseIdRequired": { "name": "x-ms-lease-id", "x-ms-client-name": "leaseId", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "Specifies the current lease ID on the resource." }, "Owner": { "name": "x-ms-owner", "x-ms-client-name": "owner", "in": "header", "required": false, "type": "string", "description": "Optional. The owner of the blob or directory.", "x-ms-parameter-location": "method" }, "Group": { "name": "x-ms-group", "x-ms-client-name": "group", "in": "header", "required": false, "type": "string", "description": "Optional. The owning group of the blob or directory.", "x-ms-parameter-location": "method" }, "Upn": { "name": "upn", "x-ms-client-name": "upn", "in": "query", "description": "Optional. Valid only when Hierarchical Namespace is enabled for the account. If \"true\", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If \"false\", the values will be returned as Azure Active Directory Object IDs. The default value is false.", "required": false, "type": "boolean", "x-ms-parameter-location": "method" }, "Marker": { "name": "marker", "in": "query", "required": false, "type": "string", "description": "A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client.", "x-ms-parameter-location": "method" }, "MaxResults": { "name": "maxresults", "in": "query", "required": false, "type": "integer", "minimum": 1, "x-ms-parameter-location": "method", "description": "Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000." }, "Metadata": { "name": "x-ms-meta", "x-ms-client-name": "metadata", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.", "x-ms-header-collection-prefix": "x-ms-meta-" }, "MultipartContentType": { "name": "Content-Type", "x-ms-client-name": "multipartContentType", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "Required. The value of this header must be multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_" }, "PathRenameMode": { "name": "mode", "x-ms-client-name": "pathRenameMode", "description": "Determines the behavior of the rename operation", "in": "query", "required": false, "type": "string", "enum": [ "legacy", "posix" ], "x-ms-enum": { "name": "PathRenameMode", "modelAsString": false } }, "PosixPermissions": { "name": "x-ms-permissions", "description": "Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.", "x-ms-client-name": "posixPermissions", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method" }, "PosixAcl": { "name": "x-ms-acl", "description": "Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. Each access control entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the format \"[scope:][type]:[id]:[permissions]\".", "x-ms-client-name": "posixAcl", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method" }, "PosixUmask": { "name": "x-ms-umask", "x-ms-client-name": "posixUmask", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Only valid if Hierarchical Namespace is enabled for the account. This umask restricts permission settings for file and directory, and will only be applied when default Acl does not exist in parent directory. If the umask bit has set, it means that the corresponding permission will be disabled. Otherwise the corresponding permission will be determined by the permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, a default umask - 0027 will be used." }, "Prefix": { "name": "prefix", "in": "query", "required": false, "type": "string", "description": "Filters the results to return only containers whose name begins with the specified prefix.", "x-ms-parameter-location": "method" }, "PrevSnapshot": { "name": "prevsnapshot", "in": "query", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response will contain only pages that were changed between target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots are currently supported only for blobs created on or after January 1, 2016." }, "ProposedLeaseIdOptional": { "name": "x-ms-proposed-lease-id", "x-ms-client-name": "proposedLeaseId", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats." }, "ProposedLeaseIdRequired": { "name": "x-ms-proposed-lease-id", "x-ms-client-name": "proposedLeaseId", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats." }, "Range": { "name": "x-ms-range", "x-ms-client-name": "range", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Return only the bytes of the blob in the specified range." }, "RangeRequiredPutPageFromUrl": { "name": "x-ms-range", "x-ms-client-name": "range", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "The range of bytes to which the source range would be written. The range should be 512 aligned and range-end is required." }, "RecursiveDirectoryDelete": { "name": "recursive", "x-ms-client-name": "recursiveDirectoryDelete", "in": "query", "required": true, "type": "boolean", "x-ms-parameter-location": "method", "description": "If \"true\", all paths beneath the directory will be deleted. If \"false\" and the directory is non-empty, an error occurs." }, "SequenceNumberAction": { "name": "x-ms-sequence-number-action", "x-ms-client-name": "sequenceNumberAction", "in": "header", "required": true, "x-ms-parameter-location": "method", "description": "Required if the x-ms-blob-sequence-number header is set for the request. This property applies to page blobs only. This property indicates how the service should modify the blob's sequence number", "type": "string", "enum": [ "max", "update", "increment" ], "x-ms-enum": { "name": "SequenceNumberActionType", "modelAsString": false } }, "Snapshot": { "name": "snapshot", "in": "query", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see Creating a Snapshot of a Blob." }, "SourceContentMD5": { "name": "x-ms-source-content-md5", "x-ms-client-name": "sourceContentMD5", "in": "header", "required": false, "type": "string", "format": "byte", "x-ms-parameter-location": "method", "description": "Specify the md5 calculated for the range of bytes that must be read from the copy source." }, "SourceContentCRC64": { "name": "x-ms-source-content-crc64", "x-ms-client-name": "sourceContentcrc64", "in": "header", "required": false, "type": "string", "format": "byte", "x-ms-parameter-location": "method", "description": "Specify the crc64 calculated for the range of bytes that must be read from the copy source." }, "SourceRange": { "name": "x-ms-source-range", "x-ms-client-name": "sourceRange", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "Bytes of source data in the specified range." }, "SourceRangeRequiredPutPageFromUrl": { "name": "x-ms-source-range", "x-ms-client-name": "sourceRange", "in": "header", "required": true, "type": "string", "x-ms-parameter-location": "method", "description": "Bytes of source data in the specified range. The length of this range should match the ContentLength header and x-ms-range/Range destination range header." }, "SourceIfMatch": { "name": "x-ms-source-if-match", "x-ms-client-name": "sourceIfMatch", "in": "header", "required": false, "type": "string", "format": "etag", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "source-modified-access-conditions" }, "description": "Specify an ETag value to operate only on blobs with a matching value." }, "SourceIfModifiedSince": { "name": "x-ms-source-if-modified-since", "x-ms-client-name": "sourceIfModifiedSince", "in": "header", "required": false, "type": "string", "format": "date-time-rfc1123", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "source-modified-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has been modified since the specified date/time." }, "SourceIfNoneMatch": { "name": "x-ms-source-if-none-match", "x-ms-client-name": "sourceIfNoneMatch", "in": "header", "required": false, "type": "string", "format": "etag", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "source-modified-access-conditions" }, "description": "Specify an ETag value to operate only on blobs without a matching value." }, "SourceIfUnmodifiedSince": { "name": "x-ms-source-if-unmodified-since", "x-ms-client-name": "sourceIfUnmodifiedSince", "in": "header", "required": false, "type": "string", "format": "date-time-rfc1123", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "source-modified-access-conditions" }, "description": "Specify this header value to operate only on a blob if it has not been modified since the specified date/time." }, "SourceLeaseId": { "name": "x-ms-source-lease-id", "x-ms-client-name": "sourceLeaseId", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "description": "A lease ID for the source path. If specified, the source path must have an active lease and the leaase ID must match." }, "SourceUrl": { "name": "x-ms-copy-source", "x-ms-client-name": "sourceUrl", "in": "header", "required": true, "type": "string", "format": "url", "x-ms-parameter-location": "method", "description": "Specify a URL to the copy source." }, "StorageServiceProperties": { "name": "StorageServiceProperties", "in": "body", "required": true, "schema": { "$ref": "#/definitions/StorageServiceProperties" }, "x-ms-parameter-location": "method", "description": "The StorageService properties." }, "Timeout": { "name": "timeout", "in": "query", "required": false, "type": "integer", "minimum": 0, "x-ms-parameter-location": "method", "description": "The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations." }, "XMsCacheControl": { "name": "x-ms-cache-control", "x-ms-client-name": "cacheControl", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "directory-http-headers" }, "description": "Cache control for given resource" }, "XMsContentType": { "name": "x-ms-content-type", "x-ms-client-name": "contentType", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "directory-http-headers" }, "description": "Content type for given resource" }, "XMsContentEncoding": { "name": "x-ms-content-encoding", "x-ms-client-name": "contentEncoding", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "directory-http-headers" }, "description": "Content encoding for given resource" }, "XMsContentLanguage": { "name": "x-ms-content-language", "x-ms-client-name": "contentLanguage", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "directory-http-headers" }, "description": "Content language for given resource" }, "XMsContentDisposition": { "name": "x-ms-content-disposition", "x-ms-client-name": "contentDisposition", "in": "header", "required": false, "type": "string", "x-ms-parameter-location": "method", "x-ms-parameter-grouping": { "name": "directory-http-headers" }, "description": "Content disposition for given resource" } } }