Update azure backend storage sdk (#24669)

* update vendored azure sdk

* vendor giovanni storage sdk

* Add giovanni clients

* go mod vendor

* Swap to new storage sdk

* workable tests

* update .go-version to 1.14.2

* Tests working minus SAS

* Add SAS Token support

* Update vendor

* Passing tests

* Add date randomizer

* Captalize RG

* Remove random bits

* Update client var name

Co-authored-by: kt <kt@katbyte.me>
This commit is contained in:
Matthew Frahry 2020-05-20 08:29:02 -07:00 committed by GitHub
parent eead4c49fe
commit 481b03c34a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
141 changed files with 7294 additions and 8476 deletions

View File

@ -1 +1 @@
1.14
1.14.2

View File

@ -4,14 +4,14 @@ import (
"context"
"fmt"
"log"
"net/url"
"os"
"strings"
"time"
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers"
"github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources"
armStorage "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/hashicorp/go-azure-helpers/authentication"
@ -23,6 +23,8 @@ type ArmClient struct {
// These Clients are only initialized if an Access Key isn't provided
groupsClient *resources.GroupsClient
storageAccountsClient *armStorage.AccountsClient
containersClient *containers.Client
blobsClient *blobs.Client
accessKey string
environment azure.Environment
@ -106,49 +108,81 @@ func buildArmEnvironment(config BackendConfig) (*azure.Environment, error) {
return authentication.DetermineEnvironment(config.Environment)
}
func (c ArmClient) getBlobClient(ctx context.Context) (*storage.BlobStorageClient, error) {
if c.accessKey != "" {
log.Printf("[DEBUG] Building the Blob Client from an Access Token")
storageClient, err := storage.NewBasicClientOnSovereignCloud(c.storageAccountName, c.accessKey, c.environment)
if err != nil {
return nil, fmt.Errorf("Error creating storage client for storage account %q: %s", c.storageAccountName, err)
}
client := storageClient.GetBlobService()
return &client, nil
}
func (c ArmClient) getBlobClient(ctx context.Context) (*blobs.Client, error) {
if c.sasToken != "" {
log.Printf("[DEBUG] Building the Blob Client from a SAS Token")
token := strings.TrimPrefix(c.sasToken, "?")
uri, err := url.ParseQuery(token)
storageAuth, err := autorest.NewSASTokenAuthorizer(c.sasToken)
if err != nil {
return nil, fmt.Errorf("Error parsing SAS Token: %+v", err)
return nil, fmt.Errorf("Error building Authorizer: %+v", err)
}
storageClient := storage.NewAccountSASClient(c.storageAccountName, uri, c.environment)
client := storageClient.GetBlobService()
return &client, nil
blobsClient := blobs.NewWithEnvironment(c.environment)
c.configureClient(&blobsClient.Client, storageAuth)
return &blobsClient, nil
}
log.Printf("[DEBUG] Building the Blob Client from an Access Token (using user credentials)")
keys, err := c.storageAccountsClient.ListKeys(ctx, c.resourceGroupName, c.storageAccountName)
accessKey := c.accessKey
if accessKey == "" {
log.Printf("[DEBUG] Building the Blob Client from an Access Token (using user credentials)")
keys, err := c.storageAccountsClient.ListKeys(ctx, c.resourceGroupName, c.storageAccountName)
if err != nil {
return nil, fmt.Errorf("Error retrieving keys for Storage Account %q: %s", c.storageAccountName, err)
}
if keys.Keys == nil {
return nil, fmt.Errorf("Nil key returned for storage account %q", c.storageAccountName)
}
accessKeys := *keys.Keys
accessKey = *accessKeys[0].Value
}
storageAuth, err := autorest.NewSharedKeyAuthorizer(c.storageAccountName, accessKey, autorest.SharedKey)
if err != nil {
return nil, fmt.Errorf("Error retrieving keys for Storage Account %q: %s", c.storageAccountName, err)
return nil, fmt.Errorf("Error building Authorizer: %+v", err)
}
if keys.Keys == nil {
return nil, fmt.Errorf("Nil key returned for storage account %q", c.storageAccountName)
blobsClient := blobs.NewWithEnvironment(c.environment)
c.configureClient(&blobsClient.Client, storageAuth)
return &blobsClient, nil
}
func (c ArmClient) getContainersClient(ctx context.Context) (*containers.Client, error) {
if c.sasToken != "" {
log.Printf("[DEBUG] Building the Container Client from a SAS Token")
storageAuth, err := autorest.NewSASTokenAuthorizer(c.sasToken)
if err != nil {
return nil, fmt.Errorf("Error building Authorizer: %+v", err)
}
containersClient := containers.NewWithEnvironment(c.environment)
c.configureClient(&containersClient.Client, storageAuth)
return &containersClient, nil
}
accessKey := c.accessKey
if accessKey == "" {
log.Printf("[DEBUG] Building the Container Client from an Access Token (using user credentials)")
keys, err := c.storageAccountsClient.ListKeys(ctx, c.resourceGroupName, c.storageAccountName)
if err != nil {
return nil, fmt.Errorf("Error retrieving keys for Storage Account %q: %s", c.storageAccountName, err)
}
if keys.Keys == nil {
return nil, fmt.Errorf("Nil key returned for storage account %q", c.storageAccountName)
}
accessKeys := *keys.Keys
accessKey = *accessKeys[0].Value
}
accessKeys := *keys.Keys
accessKey := accessKeys[0].Value
storageClient, err := storage.NewBasicClientOnSovereignCloud(c.storageAccountName, *accessKey, c.environment)
storageAuth, err := autorest.NewSharedKeyAuthorizer(c.storageAccountName, accessKey, autorest.SharedKey)
if err != nil {
return nil, fmt.Errorf("Error creating storage client for storage account %q: %s", c.storageAccountName, err)
return nil, fmt.Errorf("Error building Authorizer: %+v", err)
}
client := storageClient.GetBlobService()
return &client, nil
containersClient := containers.NewWithEnvironment(c.environment)
c.configureClient(&containersClient.Client, storageAuth)
return &containersClient, nil
}
func (c *ArmClient) configureClient(client *autorest.Client, auth autorest.Authorizer) {

View File

@ -149,6 +149,7 @@ type Backend struct {
armClient *ArmClient
containerName string
keyName string
accountName string
}
type BackendConfig struct {
@ -177,6 +178,7 @@ func (b *Backend) configure(ctx context.Context) error {
// Grab the resource data
data := schema.FromContextBackendConfig(ctx)
b.containerName = data.Get("container_name").(string)
b.accountName = data.Get("storage_account_name").(string)
b.keyName = data.Get("key").(string)
// support for previously deprecated fields

View File

@ -6,11 +6,12 @@ import (
"sort"
"strings"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/states"
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers"
)
const (
@ -21,23 +22,22 @@ const (
func (b *Backend) Workspaces() ([]string, error) {
prefix := b.keyName + keyEnvPrefix
params := storage.ListBlobsParameters{
Prefix: prefix,
params := containers.ListBlobsInput{
Prefix: &prefix,
}
ctx := context.TODO()
client, err := b.armClient.getBlobClient(ctx)
client, err := b.armClient.getContainersClient(ctx)
if err != nil {
return nil, err
}
container := client.GetContainerReference(b.containerName)
resp, err := container.ListBlobs(params)
resp, err := client.ListBlobs(ctx, b.armClient.storageAccountName, b.containerName, params)
if err != nil {
return nil, err
}
envs := map[string]struct{}{}
for _, obj := range resp.Blobs {
for _, obj := range resp.Blobs.Blobs {
key := obj.Name
if strings.HasPrefix(key, prefix) {
name := strings.TrimPrefix(key, prefix)
@ -69,11 +69,13 @@ func (b *Backend) DeleteWorkspace(name string) error {
return err
}
containerReference := client.GetContainerReference(b.containerName)
blobReference := containerReference.GetBlobReference(b.path(name))
options := &storage.DeleteBlobOptions{}
if resp, err := client.Delete(ctx, b.armClient.storageAccountName, b.containerName, b.path(name), blobs.DeleteInput{}); err != nil {
if resp.Response.StatusCode != 404 {
return err
}
}
return blobReference.Delete(options)
return nil
}
func (b *Backend) StateMgr(name string) (state.State, error) {
@ -84,9 +86,10 @@ func (b *Backend) StateMgr(name string) (state.State, error) {
}
client := &RemoteClient{
blobClient: *blobClient,
containerName: b.containerName,
keyName: b.path(name),
giovanniBlobClient: *blobClient,
containerName: b.containerName,
keyName: b.path(name),
accountName: b.accountName,
}
stateMgr := &remote.State{Client: client}

View File

@ -1,19 +1,15 @@
package azure
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"log"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/state/remote"
"github.com/hashicorp/terraform/states"
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
)
const (
@ -23,40 +19,30 @@ const (
)
type RemoteClient struct {
blobClient storage.BlobStorageClient
containerName string
keyName string
leaseID string
giovanniBlobClient blobs.Client
accountName string
containerName string
keyName string
leaseID string
}
func (c *RemoteClient) Get() (*remote.Payload, error) {
containerReference := c.blobClient.GetContainerReference(c.containerName)
blobReference := containerReference.GetBlobReference(c.keyName)
options := &storage.GetBlobOptions{}
options := blobs.GetInput{}
if c.leaseID != "" {
options.LeaseID = c.leaseID
options.LeaseID = &c.leaseID
}
blob, err := blobReference.Get(options)
ctx := context.TODO()
blob, err := c.giovanniBlobClient.Get(ctx, c.accountName, c.containerName, c.keyName, options)
if err != nil {
if storErr, ok := err.(storage.AzureStorageServiceError); ok {
if storErr.Code == "BlobNotFound" {
return nil, nil
}
if blob.Response.StatusCode == 404 {
return nil, nil
}
return nil, err
}
defer blob.Close()
buf := bytes.NewBuffer(nil)
if _, err := io.Copy(buf, blob); err != nil {
return nil, fmt.Errorf("Failed to read remote state: %s", err)
}
payload := &remote.Payload{
Data: buf.Bytes(),
Data: blob.Contents,
}
// If there was no data, then return nil
@ -68,54 +54,50 @@ func (c *RemoteClient) Get() (*remote.Payload, error) {
}
func (c *RemoteClient) Put(data []byte) error {
getOptions := &storage.GetBlobMetadataOptions{}
setOptions := &storage.SetBlobPropertiesOptions{}
putOptions := &storage.PutBlobOptions{}
containerReference := c.blobClient.GetContainerReference(c.containerName)
blobReference := containerReference.GetBlobReference(c.keyName)
blobReference.Properties.ContentType = "application/json"
blobReference.Properties.ContentLength = int64(len(data))
getOptions := blobs.GetPropertiesInput{}
setOptions := blobs.SetPropertiesInput{}
putOptions := blobs.PutBlockBlobInput{}
options := blobs.GetInput{}
if c.leaseID != "" {
getOptions.LeaseID = c.leaseID
setOptions.LeaseID = c.leaseID
putOptions.LeaseID = c.leaseID
options.LeaseID = &c.leaseID
getOptions.LeaseID = &c.leaseID
setOptions.LeaseID = &c.leaseID
putOptions.LeaseID = &c.leaseID
}
exists, err := blobReference.Exists()
ctx := context.TODO()
blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, getOptions)
if err != nil {
return err
}
if exists {
err = blobReference.GetMetadata(getOptions)
if err != nil {
if blob.StatusCode != 404 {
return err
}
}
reader := bytes.NewReader(data)
contentType := "application/json"
putOptions.Content = &data
putOptions.ContentType = &contentType
putOptions.MetaData = blob.MetaData
_, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putOptions)
err = blobReference.CreateBlockBlobFromReader(reader, putOptions)
if err != nil {
return err
}
return blobReference.SetProperties(setOptions)
return err
}
func (c *RemoteClient) Delete() error {
containerReference := c.blobClient.GetContainerReference(c.containerName)
blobReference := containerReference.GetBlobReference(c.keyName)
options := &storage.DeleteBlobOptions{}
options := blobs.DeleteInput{}
if c.leaseID != "" {
options.LeaseID = c.leaseID
options.LeaseID = &c.leaseID
}
return blobReference.Delete(options)
ctx := context.TODO()
resp, err := c.giovanniBlobClient.Delete(ctx, c.accountName, c.containerName, c.keyName, options)
if err != nil {
if resp.Response.StatusCode != 404 {
return err
}
}
return nil
}
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
@ -143,41 +125,44 @@ func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
}
}
containerReference := c.blobClient.GetContainerReference(c.containerName)
blobReference := containerReference.GetBlobReference(c.keyName)
leaseID, err := blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{})
leaseOptions := blobs.AcquireLeaseInput{
ProposedLeaseID: &info.ID,
LeaseDuration: -1,
}
ctx := context.TODO()
// obtain properties to see if the blob lease is already in use. If the blob doesn't exist, create it
properties, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{})
if err != nil {
if storErr, ok := err.(storage.AzureStorageServiceError); ok && storErr.Code != "BlobNotFound" {
// error if we had issues getting the blob
if properties.Response.StatusCode != 404 {
return "", getLockInfoErr(err)
}
// if we don't find the blob, we need to build it
// failed to lock as there was no state blob, write empty state
stateMgr := &remote.State{Client: c}
// ensure state is actually empty
if err := stateMgr.RefreshState(); err != nil {
return "", fmt.Errorf("Failed to refresh state before writing empty state for locking: %s", err)
contentType := "application/json"
putGOptions := blobs.PutBlockBlobInput{
ContentType: &contentType,
}
log.Print("[DEBUG] Could not lock as state blob did not exist, creating with empty state")
if v := stateMgr.State(); v == nil {
if err := stateMgr.WriteState(states.NewState()); err != nil {
return "", fmt.Errorf("Failed to write empty state for locking: %s", err)
}
if err := stateMgr.PersistState(); err != nil {
return "", fmt.Errorf("Failed to persist empty state for locking: %s", err)
}
}
leaseID, err = blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{})
_, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putGOptions)
if err != nil {
return "", getLockInfoErr(err)
}
}
info.ID = leaseID
c.leaseID = leaseID
// if the blob is already locked then error
if properties.LeaseStatus == blobs.Locked {
return "", getLockInfoErr(fmt.Errorf("state blob is already locked"))
}
leaseID, err := c.giovanniBlobClient.AcquireLease(ctx, c.accountName, c.containerName, c.keyName, leaseOptions)
if err != nil {
return "", getLockInfoErr(err)
}
info.ID = leaseID.LeaseID
c.leaseID = leaseID.LeaseID
if err := c.writeLockInfo(info); err != nil {
return "", err
@ -187,14 +172,18 @@ func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
}
func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
containerReference := c.blobClient.GetContainerReference(c.containerName)
blobReference := containerReference.GetBlobReference(c.keyName)
err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{})
options := blobs.GetPropertiesInput{}
if c.leaseID != "" {
options.LeaseID = &c.leaseID
}
ctx := context.TODO()
blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, options)
if err != nil {
return nil, err
}
raw := blobReference.Metadata[lockInfoMetaKey]
raw := blob.MetaData[lockInfoMetaKey]
if raw == "" {
return nil, fmt.Errorf("blob metadata %q was empty", lockInfoMetaKey)
}
@ -215,26 +204,29 @@ func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
// writes info to blob meta data, deletes metadata entry if info is nil
func (c *RemoteClient) writeLockInfo(info *state.LockInfo) error {
containerReference := c.blobClient.GetContainerReference(c.containerName)
blobReference := containerReference.GetBlobReference(c.keyName)
err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{
LeaseID: c.leaseID,
})
ctx := context.TODO()
blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{LeaseID: &c.leaseID})
if err != nil {
return err
}
if err != nil {
return err
}
if info == nil {
delete(blobReference.Metadata, lockInfoMetaKey)
delete(blob.MetaData, lockInfoMetaKey)
} else {
value := base64.StdEncoding.EncodeToString(info.Marshal())
blobReference.Metadata[lockInfoMetaKey] = value
blob.MetaData[lockInfoMetaKey] = value
}
opts := &storage.SetBlobMetadataOptions{
LeaseID: c.leaseID,
opts := blobs.SetMetaDataInput{
LeaseID: &c.leaseID,
MetaData: blob.MetaData,
}
return blobReference.SetMetadata(opts)
_, err = c.giovanniBlobClient.SetMetaData(ctx, c.accountName, c.containerName, c.keyName, opts)
return err
}
func (c *RemoteClient) Unlock(id string) error {
@ -258,9 +250,8 @@ func (c *RemoteClient) Unlock(id string) error {
return lockErr
}
containerReference := c.blobClient.GetContainerReference(c.containerName)
blobReference := containerReference.GetBlobReference(c.keyName)
err = blobReference.ReleaseLease(id, &storage.LeaseOptions{})
ctx := context.TODO()
_, err = c.giovanniBlobClient.ReleaseLease(ctx, c.accountName, c.containerName, c.keyName, id)
if err != nil {
lockErr.Err = err
return lockErr

View File

@ -5,10 +5,10 @@ import (
"os"
"testing"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/state/remote"
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
)
func TestRemoteClient_impl(t *testing.T) {
@ -264,21 +264,22 @@ func TestPutMaintainsMetaData(t *testing.T) {
if err != nil {
t.Fatalf("Error building Blob Client: %+v", err)
}
containerReference := client.GetContainerReference(res.storageContainerName)
blobReference := containerReference.GetBlobReference(res.storageKeyName)
err = blobReference.CreateBlockBlob(&storage.PutBlobOptions{})
_, err = client.PutBlockBlob(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.PutBlockBlobInput{})
if err != nil {
t.Fatalf("Error Creating Block Blob: %+v", err)
}
err = blobReference.GetMetadata(&storage.GetBlobMetadataOptions{})
blobReference, err := client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{})
if err != nil {
t.Fatalf("Error loading MetaData: %+v", err)
}
blobReference.Metadata[headerName] = expectedValue
err = blobReference.SetMetadata(&storage.SetBlobMetadataOptions{})
blobReference.MetaData[headerName] = expectedValue
opts := blobs.SetMetaDataInput{
MetaData: blobReference.MetaData,
}
_, err = client.SetMetaData(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, opts)
if err != nil {
t.Fatalf("Error setting MetaData: %+v", err)
}
@ -287,8 +288,9 @@ func TestPutMaintainsMetaData(t *testing.T) {
remoteClient := RemoteClient{
keyName: res.storageKeyName,
containerName: res.storageContainerName,
accountName: res.storageAccountName,
blobClient: *client,
giovanniBlobClient: *client,
}
bytes := []byte(acctest.RandString(20))
@ -298,12 +300,12 @@ func TestPutMaintainsMetaData(t *testing.T) {
}
// Verify it still exists
err = blobReference.GetMetadata(&storage.GetBlobMetadataOptions{})
blobReference, err = client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{})
if err != nil {
t.Fatalf("Error loading MetaData: %+v", err)
}
if blobReference.Metadata[headerName] != expectedValue {
t.Fatalf("%q was not set to %q in the MetaData: %+v", headerName, expectedValue, blobReference.Metadata)
if blobReference.MetaData[headerName] != expectedValue {
t.Fatalf("%q was not set to %q in the MetaData: %+v", headerName, expectedValue, blobReference.MetaData)
}
}

View File

@ -11,8 +11,9 @@ import (
"github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources"
armStorage "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest"
sasStorage "github.com/hashicorp/go-azure-helpers/storage"
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers"
)
const (
@ -128,7 +129,7 @@ type resourceNames struct {
func testResourceNames(rString string, keyName string) resourceNames {
return resourceNames{
resourceGroup: fmt.Sprintf("acctestrg-backend-%s", rString),
resourceGroup: fmt.Sprintf("acctestRG-backend-%s-%s", strings.Replace(time.Now().Local().Format("060102150405.00"), ".", "", 1), rString),
location: os.Getenv("ARM_LOCATION"),
storageAccountName: fmt.Sprintf("acctestsa%s", rString),
storageContainerName: "acctestcont",
@ -170,15 +171,16 @@ func (c *ArmClient) buildTestResources(ctx context.Context, names *resourceNames
accessKey := *keys[0].Value
names.storageAccountAccessKey = accessKey
storageClient, err := storage.NewBasicClientOnSovereignCloud(names.storageAccountName, accessKey, c.environment)
storageAuth, err := autorest.NewSharedKeyAuthorizer(names.storageAccountName, accessKey, autorest.SharedKey)
if err != nil {
return fmt.Errorf("failed to list storage account keys %s:", err)
return fmt.Errorf("Error building Authorizer: %+v", err)
}
containersClient := containers.NewWithEnvironment(c.environment)
containersClient.Client.Authorizer = storageAuth
log.Printf("Creating Container %q in Storage Account %q (Resource Group %q)", names.storageContainerName, names.storageAccountName, names.resourceGroup)
blobService := storageClient.GetBlobService()
container := blobService.GetContainerReference(names.storageContainerName)
err = container.Create(&storage.CreateContainerOptions{})
_, err = containersClient.Create(ctx, names.storageAccountName, names.storageContainerName, containers.CreateInput{})
if err != nil {
return fmt.Errorf("failed to create storage container: %s", err)
}

12
go.mod
View File

@ -2,8 +2,9 @@ module github.com/hashicorp/terraform
require (
cloud.google.com/go v0.45.1
github.com/Azure/azure-sdk-for-go v36.2.0+incompatible
github.com/Azure/go-autorest/autorest v0.9.2
github.com/Azure/azure-sdk-for-go v40.3.0+incompatible
github.com/Azure/go-autorest/autorest v0.10.0
github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 // indirect
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect
github.com/agext/levenshtein v1.2.2
github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6 // indirect
@ -116,6 +117,7 @@ require (
github.com/tencentcloud/tencentcloud-sdk-go v3.0.82+incompatible
github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20190808065407-f07404cefc8c
github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 // indirect
github.com/tombuildsstuff/giovanni v0.10.1
github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5 // indirect
github.com/vmihailenco/msgpack v4.0.1+incompatible // indirect
github.com/xanzy/ssh-agent v0.2.1
@ -139,3 +141,9 @@ require (
)
go 1.14
replace github.com/Azure/go-autorest => github.com/tombuildsstuff/go-autorest v14.0.1-0.20200416184303-d4e299a3c04a+incompatible
replace github.com/Azure/go-autorest/autorest => github.com/tombuildsstuff/go-autorest/autorest v0.10.1-0.20200416184303-d4e299a3c04a
replace github.com/Azure/go-autorest/autorest/azure/auth => github.com/tombuildsstuff/go-autorest/autorest/azure/auth v0.4.3-0.20200416184303-d4e299a3c04a

46
go.sum
View File

@ -7,24 +7,28 @@ cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s=
github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/azure-sdk-for-go v40.3.0+incompatible h1:NthZg3psrLxvQLN6rVm07pZ9mv2wvGNaBNGQ3fnPvLE=
github.com/Azure/azure-sdk-for-go v40.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503 h1:Hxqlh1uAA8aGpa1dFhDNhll7U/rkWtG8ZItFvRMr7l0=
github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/azure/cli v0.2.0 h1:pSwNMF0qotgehbQNllUWwJ4V3vnrLKOzHrwDLEZK904=
github.com/Azure/go-autorest/autorest/azure/cli v0.2.0/go.mod h1:WWTbGPvkAg3I4ms2j2s+Zr5xCGwGqTQh+6M2ZqOczkE=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
@ -42,6 +46,7 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8=
github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
@ -123,6 +128,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31 h1:Dzuw9GtbmllUqEcoHfScT9YpKFUssSiZ5PgZkIGf/YQ=
@ -200,6 +206,7 @@ github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089 h1:1eDpXAxTh0iPv+
github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-azure-helpers v0.4.1/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA=
github.com/hashicorp/go-azure-helpers v0.10.0 h1:KhjDnQhCqEMKlt4yH00MCevJQPJ6LkHFdSveXINO6vE=
github.com/hashicorp/go-azure-helpers v0.10.0/go.mod h1:YuAtHxm2v74s+IjQwUG88dHBJPd5jL+cXr5BGVzSKhE=
github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU=
@ -425,6 +432,18 @@ github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20190808065407-f07404cefc8c h1:iRD1Cq
github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20190808065407-f07404cefc8c/go.mod h1:wk2XFUg6egk4tSDNZtXeKfe2G6690UVyt163PuUxBZk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc=
github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tombuildsstuff/giovanni v0.10.0 h1:XqZBPVD2hETa30FFdMz/zVfnidMnUrIMMmKIH7hWnWA=
github.com/tombuildsstuff/giovanni v0.10.0/go.mod h1:WwPhFP2+WnhJzvPYDnsyBab2wOIksMX6xm+Tg+jVvKw=
github.com/tombuildsstuff/giovanni v0.10.1 h1:AWfooo7kvVgfpjdp+NF/WDM5Pbkt4KMSv/h57t3DNRU=
github.com/tombuildsstuff/giovanni v0.10.1/go.mod h1:WwPhFP2+WnhJzvPYDnsyBab2wOIksMX6xm+Tg+jVvKw=
github.com/tombuildsstuff/go-autorest v14.0.1-0.20200317095413-f2d2d0252c3c+incompatible h1:H+ytGDYKp8E7Mb0GBK3LMaJd9O2lUo2Cmb4sTMHM3N8=
github.com/tombuildsstuff/go-autorest v14.0.1-0.20200317095413-f2d2d0252c3c+incompatible/go.mod h1:OVwh0+NZeL2RTqclVEX+p20Qys7Ihpd52PD0eqFDXtY=
github.com/tombuildsstuff/go-autorest v14.0.1-0.20200416184303-d4e299a3c04a+incompatible h1:9645FYqYopS+TFknygW7EC9PCbIC5T4WvWUpktyE2JA=
github.com/tombuildsstuff/go-autorest v14.0.1-0.20200416184303-d4e299a3c04a+incompatible/go.mod h1:OVwh0+NZeL2RTqclVEX+p20Qys7Ihpd52PD0eqFDXtY=
github.com/tombuildsstuff/go-autorest/autorest v0.10.1-0.20200317095413-f2d2d0252c3c h1:9KiivLYWWeGGT2jzfB5T5Tj/gsSWf1KgbDwD9H7rrWM=
github.com/tombuildsstuff/go-autorest/autorest v0.10.1-0.20200317095413-f2d2d0252c3c/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/tombuildsstuff/go-autorest/autorest v0.10.1-0.20200416184303-d4e299a3c04a h1:F/4zKpn8ra3rhPMBzrVc7LYL1GB1ucl/va4I+4ubUWg=
github.com/tombuildsstuff/go-autorest/autorest v0.10.1-0.20200416184303-d4e299a3c04a/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5 h1:cMjKdf4PxEBN9K5HaD9UMW8gkTbM0kMzkTa9SJe0WNQ=
github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
@ -459,6 +478,7 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@ -466,8 +486,8 @@ golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e h1:egKlR8l7Nu9vHGWbcUV8lqR4987UfUbBd7GbhqGzNYU=
golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -494,15 +514,12 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191009170851-d66e71096ffb h1:TR699M2v0qoKTOHxeLgp6zPqaQNs74f01a/ob9W0qko=
golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -547,9 +564,7 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e h1:aZzprAO9/8oim3qStq3wc1Xuxx4QmAGriC4VU4ojemQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371 h1:Cjq6sG3gnKDchzWy7ouGQklhxMtWvh4AhSNJ0qGIeo4=
golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -561,15 +576,11 @@ google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -584,9 +595,7 @@ google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
@ -594,7 +603,6 @@ gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=

View File

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Microsoft Corporation
Copyright 2020 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
// +build go1.9
// Copyright 2019 Microsoft Corporation
// Copyright 2020 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -73,6 +73,7 @@ type ErrorAdditionalInfo = original.ErrorAdditionalInfo
type ErrorResponse = original.ErrorResponse
type ExportTemplateRequest = original.ExportTemplateRequest
type GenericResource = original.GenericResource
type GenericResourceExpanded = original.GenericResourceExpanded
type GenericResourceFilter = original.GenericResourceFilter
type Group = original.Group
type GroupExportResult = original.GroupExportResult

View File

@ -1,6 +1,6 @@
// +build go1.9
// Copyright 2019 Microsoft Corporation
// Copyright 2020 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -37,7 +37,8 @@ func NewApplicationsClient(tenantID string) ApplicationsClient {
return NewApplicationsClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewApplicationsClientWithBaseURI creates an instance of the ApplicationsClient client.
// NewApplicationsClientWithBaseURI creates an instance of the ApplicationsClient client using a custom endpoint. Use
// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewApplicationsClientWithBaseURI(baseURI string, tenantID string) ApplicationsClient {
return ApplicationsClient{NewWithBaseURI(baseURI, tenantID)}
}
@ -110,8 +111,7 @@ func (client ApplicationsClient) AddOwnerPreparer(ctx context.Context, applicati
// AddOwnerSender sends the AddOwner request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) AddOwnerSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// AddOwnerResponder handles the response to the AddOwner request. The method always
@ -191,8 +191,7 @@ func (client ApplicationsClient) CreatePreparer(ctx context.Context, parameters
// CreateSender sends the Create request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) CreateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// CreateResponder handles the response to the Create request. The method always
@ -266,8 +265,7 @@ func (client ApplicationsClient) DeletePreparer(ctx context.Context, application
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) DeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DeleteResponder handles the response to the Delete request. The method always
@ -340,8 +338,7 @@ func (client ApplicationsClient) GetPreparer(ctx context.Context, applicationObj
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetResponder handles the response to the Get request. The method always
@ -415,8 +412,7 @@ func (client ApplicationsClient) GetServicePrincipalsIDByAppIDPreparer(ctx conte
// GetServicePrincipalsIDByAppIDSender sends the GetServicePrincipalsIDByAppID request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) GetServicePrincipalsIDByAppIDSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetServicePrincipalsIDByAppIDResponder handles the response to the GetServicePrincipalsIDByAppID request. The method always
@ -498,8 +494,7 @@ func (client ApplicationsClient) ListPreparer(ctx context.Context, filter string
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListResponder handles the response to the List request. The method always
@ -589,8 +584,7 @@ func (client ApplicationsClient) ListKeyCredentialsPreparer(ctx context.Context,
// ListKeyCredentialsSender sends the ListKeyCredentials request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) ListKeyCredentialsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListKeyCredentialsResponder handles the response to the ListKeyCredentials request. The method always
@ -664,8 +658,7 @@ func (client ApplicationsClient) ListNextPreparer(ctx context.Context, nextLink
// ListNextSender sends the ListNext request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) ListNextSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListNextResponder handles the response to the ListNext request. The method always
@ -740,8 +733,7 @@ func (client ApplicationsClient) ListOwnersPreparer(ctx context.Context, applica
// ListOwnersSender sends the ListOwners request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) ListOwnersSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListOwnersResponder handles the response to the ListOwners request. The method always
@ -852,8 +844,7 @@ func (client ApplicationsClient) ListPasswordCredentialsPreparer(ctx context.Con
// ListPasswordCredentialsSender sends the ListPasswordCredentials request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) ListPasswordCredentialsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListPasswordCredentialsResponder handles the response to the ListPasswordCredentials request. The method always
@ -930,8 +921,7 @@ func (client ApplicationsClient) PatchPreparer(ctx context.Context, applicationO
// PatchSender sends the Patch request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) PatchSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// PatchResponder handles the response to the Patch request. The method always
@ -1006,8 +996,7 @@ func (client ApplicationsClient) RemoveOwnerPreparer(ctx context.Context, applic
// RemoveOwnerSender sends the RemoveOwner request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) RemoveOwnerSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// RemoveOwnerResponder handles the response to the RemoveOwner request. The method always
@ -1083,8 +1072,7 @@ func (client ApplicationsClient) UpdateKeyCredentialsPreparer(ctx context.Contex
// UpdateKeyCredentialsSender sends the UpdateKeyCredentials request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) UpdateKeyCredentialsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// UpdateKeyCredentialsResponder handles the response to the UpdateKeyCredentials request. The method always
@ -1160,8 +1148,7 @@ func (client ApplicationsClient) UpdatePasswordCredentialsPreparer(ctx context.C
// UpdatePasswordCredentialsSender sends the UpdatePasswordCredentials request. The method will close the
// http.Response Body if it receives an error.
func (client ApplicationsClient) UpdatePasswordCredentialsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// UpdatePasswordCredentialsResponder handles the response to the UpdatePasswordCredentials request. The method always

View File

@ -41,7 +41,8 @@ func New(tenantID string) BaseClient {
return NewWithBaseURI(DefaultBaseURI, tenantID)
}
// NewWithBaseURI creates an instance of the BaseClient client.
// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with
// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewWithBaseURI(baseURI string, tenantID string) BaseClient {
return BaseClient{
Client: autorest.NewClientWithUserAgent(UserAgent()),

View File

@ -36,7 +36,9 @@ func NewDeletedApplicationsClient(tenantID string) DeletedApplicationsClient {
return NewDeletedApplicationsClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewDeletedApplicationsClientWithBaseURI creates an instance of the DeletedApplicationsClient client.
// NewDeletedApplicationsClientWithBaseURI creates an instance of the DeletedApplicationsClient client using a custom
// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
// stack).
func NewDeletedApplicationsClientWithBaseURI(baseURI string, tenantID string) DeletedApplicationsClient {
return DeletedApplicationsClient{NewWithBaseURI(baseURI, tenantID)}
}
@ -99,8 +101,7 @@ func (client DeletedApplicationsClient) HardDeletePreparer(ctx context.Context,
// HardDeleteSender sends the HardDelete request. The method will close the
// http.Response Body if it receives an error.
func (client DeletedApplicationsClient) HardDeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// HardDeleteResponder handles the response to the HardDelete request. The method always
@ -181,8 +182,7 @@ func (client DeletedApplicationsClient) ListPreparer(ctx context.Context, filter
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client DeletedApplicationsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListResponder handles the response to the List request. The method always
@ -272,8 +272,7 @@ func (client DeletedApplicationsClient) ListNextPreparer(ctx context.Context, ne
// ListNextSender sends the ListNext request. The method will close the
// http.Response Body if it receives an error.
func (client DeletedApplicationsClient) ListNextSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListNextResponder handles the response to the ListNext request. The method always
@ -347,8 +346,7 @@ func (client DeletedApplicationsClient) RestorePreparer(ctx context.Context, obj
// RestoreSender sends the Restore request. The method will close the
// http.Response Body if it receives an error.
func (client DeletedApplicationsClient) RestoreSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// RestoreResponder handles the response to the Restore request. The method always

View File

@ -35,7 +35,8 @@ func NewDomainsClient(tenantID string) DomainsClient {
return NewDomainsClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewDomainsClientWithBaseURI creates an instance of the DomainsClient client.
// NewDomainsClientWithBaseURI creates an instance of the DomainsClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewDomainsClientWithBaseURI(baseURI string, tenantID string) DomainsClient {
return DomainsClient{NewWithBaseURI(baseURI, tenantID)}
}
@ -98,8 +99,7 @@ func (client DomainsClient) GetPreparer(ctx context.Context, domainName string)
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client DomainsClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetResponder handles the response to the Get request. The method always
@ -175,8 +175,7 @@ func (client DomainsClient) ListPreparer(ctx context.Context, filter string) (*h
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client DomainsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListResponder handles the response to the List request. The method always

View File

@ -37,7 +37,8 @@ func NewGroupsClient(tenantID string) GroupsClient {
return NewGroupsClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client.
// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewGroupsClientWithBaseURI(baseURI string, tenantID string) GroupsClient {
return GroupsClient{NewWithBaseURI(baseURI, tenantID)}
}
@ -110,8 +111,7 @@ func (client GroupsClient) AddMemberPreparer(ctx context.Context, groupObjectID
// AddMemberSender sends the AddMember request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) AddMemberSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// AddMemberResponder handles the response to the AddMember request. The method always
@ -194,8 +194,7 @@ func (client GroupsClient) AddOwnerPreparer(ctx context.Context, objectID string
// AddOwnerSender sends the AddOwner request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) AddOwnerSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// AddOwnerResponder handles the response to the AddOwner request. The method always
@ -278,8 +277,7 @@ func (client GroupsClient) CreatePreparer(ctx context.Context, parameters GroupC
// CreateSender sends the Create request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) CreateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// CreateResponder handles the response to the Create request. The method always
@ -353,8 +351,7 @@ func (client GroupsClient) DeletePreparer(ctx context.Context, objectID string)
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) DeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DeleteResponder handles the response to the Delete request. The method always
@ -427,8 +424,7 @@ func (client GroupsClient) GetPreparer(ctx context.Context, objectID string) (*h
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetResponder handles the response to the Get request. The method always
@ -508,8 +504,7 @@ func (client GroupsClient) GetGroupMembersPreparer(ctx context.Context, objectID
// GetGroupMembersSender sends the GetGroupMembers request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) GetGroupMembersSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetGroupMembersResponder handles the response to the GetGroupMembers request. The method always
@ -599,8 +594,7 @@ func (client GroupsClient) GetGroupMembersNextPreparer(ctx context.Context, next
// GetGroupMembersNextSender sends the GetGroupMembersNext request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) GetGroupMembersNextSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetGroupMembersNextResponder handles the response to the GetGroupMembersNext request. The method always
@ -683,8 +677,7 @@ func (client GroupsClient) GetMemberGroupsPreparer(ctx context.Context, objectID
// GetMemberGroupsSender sends the GetMemberGroups request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) GetMemberGroupsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetMemberGroupsResponder handles the response to the GetMemberGroups request. The method always
@ -767,8 +760,7 @@ func (client GroupsClient) IsMemberOfPreparer(ctx context.Context, parameters Ch
// IsMemberOfSender sends the IsMemberOf request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) IsMemberOfSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// IsMemberOfResponder handles the response to the IsMemberOf request. The method always
@ -850,8 +842,7 @@ func (client GroupsClient) ListPreparer(ctx context.Context, filter string) (*ht
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListResponder handles the response to the List request. The method always
@ -941,8 +932,7 @@ func (client GroupsClient) ListNextPreparer(ctx context.Context, nextLink string
// ListNextSender sends the ListNext request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) ListNextSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListNextResponder handles the response to the ListNext request. The method always
@ -1017,8 +1007,7 @@ func (client GroupsClient) ListOwnersPreparer(ctx context.Context, objectID stri
// ListOwnersSender sends the ListOwners request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) ListOwnersSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListOwnersResponder handles the response to the ListOwners request. The method always
@ -1131,8 +1120,7 @@ func (client GroupsClient) RemoveMemberPreparer(ctx context.Context, groupObject
// RemoveMemberSender sends the RemoveMember request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) RemoveMemberSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// RemoveMemberResponder handles the response to the RemoveMember request. The method always
@ -1207,8 +1195,7 @@ func (client GroupsClient) RemoveOwnerPreparer(ctx context.Context, objectID str
// RemoveOwnerSender sends the RemoveOwner request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) RemoveOwnerSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// RemoveOwnerResponder handles the response to the RemoveOwner request. The method always

View File

@ -45,6 +45,23 @@ func PossibleConsentTypeValues() []ConsentType {
return []ConsentType{AllPrincipals, Principal}
}
// GroupMembershipClaimTypes enumerates the values for group membership claim types.
type GroupMembershipClaimTypes string
const (
// All ...
All GroupMembershipClaimTypes = "All"
// None ...
None GroupMembershipClaimTypes = "None"
// SecurityGroup ...
SecurityGroup GroupMembershipClaimTypes = "SecurityGroup"
)
// PossibleGroupMembershipClaimTypesValues returns an array of possible values for the GroupMembershipClaimTypes const type.
func PossibleGroupMembershipClaimTypesValues() []GroupMembershipClaimTypes {
return []GroupMembershipClaimTypes{All, None, SecurityGroup}
}
// ObjectType enumerates the values for object type.
type ObjectType string
@ -338,8 +355,8 @@ type Application struct {
DisplayName *string `json:"displayName,omitempty"`
// ErrorURL - A URL provided by the author of the application to report errors when using the application.
ErrorURL *string `json:"errorUrl,omitempty"`
// GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects.
GroupMembershipClaims interface{} `json:"groupMembershipClaims,omitempty"`
// GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. Possible values include: 'None', 'SecurityGroup', 'All'
GroupMembershipClaims GroupMembershipClaimTypes `json:"groupMembershipClaims,omitempty"`
// Homepage - The home page of the application.
Homepage *string `json:"homepage,omitempty"`
// IdentifierUris - A collection of URIs for the application.
@ -424,7 +441,7 @@ func (a Application) MarshalJSON() ([]byte, error) {
if a.ErrorURL != nil {
objectMap["errorUrl"] = a.ErrorURL
}
if a.GroupMembershipClaims != nil {
if a.GroupMembershipClaims != "" {
objectMap["groupMembershipClaims"] = a.GroupMembershipClaims
}
if a.Homepage != nil {
@ -624,7 +641,7 @@ func (a *Application) UnmarshalJSON(body []byte) error {
}
case "groupMembershipClaims":
if v != nil {
var groupMembershipClaims interface{}
var groupMembershipClaims GroupMembershipClaimTypes
err = json.Unmarshal(*v, &groupMembershipClaims)
if err != nil {
return err
@ -890,8 +907,8 @@ type ApplicationBase struct {
AvailableToOtherTenants *bool `json:"availableToOtherTenants,omitempty"`
// ErrorURL - A URL provided by the author of the application to report errors when using the application.
ErrorURL *string `json:"errorUrl,omitempty"`
// GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects.
GroupMembershipClaims interface{} `json:"groupMembershipClaims,omitempty"`
// GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. Possible values include: 'None', 'SecurityGroup', 'All'
GroupMembershipClaims GroupMembershipClaimTypes `json:"groupMembershipClaims,omitempty"`
// Homepage - The home page of the application.
Homepage *string `json:"homepage,omitempty"`
// InformationalUrls - URLs with more information about the application.
@ -955,8 +972,8 @@ type ApplicationCreateParameters struct {
AvailableToOtherTenants *bool `json:"availableToOtherTenants,omitempty"`
// ErrorURL - A URL provided by the author of the application to report errors when using the application.
ErrorURL *string `json:"errorUrl,omitempty"`
// GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects.
GroupMembershipClaims interface{} `json:"groupMembershipClaims,omitempty"`
// GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. Possible values include: 'None', 'SecurityGroup', 'All'
GroupMembershipClaims GroupMembershipClaimTypes `json:"groupMembershipClaims,omitempty"`
// Homepage - The home page of the application.
Homepage *string `json:"homepage,omitempty"`
// InformationalUrls - URLs with more information about the application.
@ -1154,8 +1171,8 @@ type ApplicationUpdateParameters struct {
AvailableToOtherTenants *bool `json:"availableToOtherTenants,omitempty"`
// ErrorURL - A URL provided by the author of the application to report errors when using the application.
ErrorURL *string `json:"errorUrl,omitempty"`
// GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects.
GroupMembershipClaims interface{} `json:"groupMembershipClaims,omitempty"`
// GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. Possible values include: 'None', 'SecurityGroup', 'All'
GroupMembershipClaims GroupMembershipClaimTypes `json:"groupMembershipClaims,omitempty"`
// Homepage - The home page of the application.
Homepage *string `json:"homepage,omitempty"`
// InformationalUrls - URLs with more information about the application.

View File

@ -36,7 +36,9 @@ func NewOAuth2PermissionGrantClient(tenantID string) OAuth2PermissionGrantClient
return NewOAuth2PermissionGrantClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewOAuth2PermissionGrantClientWithBaseURI creates an instance of the OAuth2PermissionGrantClient client.
// NewOAuth2PermissionGrantClientWithBaseURI creates an instance of the OAuth2PermissionGrantClient client using a
// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds,
// Azure stack).
func NewOAuth2PermissionGrantClientWithBaseURI(baseURI string, tenantID string) OAuth2PermissionGrantClient {
return OAuth2PermissionGrantClient{NewWithBaseURI(baseURI, tenantID)}
}
@ -103,8 +105,7 @@ func (client OAuth2PermissionGrantClient) CreatePreparer(ctx context.Context, bo
// CreateSender sends the Create request. The method will close the
// http.Response Body if it receives an error.
func (client OAuth2PermissionGrantClient) CreateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// CreateResponder handles the response to the Create request. The method always
@ -178,8 +179,7 @@ func (client OAuth2PermissionGrantClient) DeletePreparer(ctx context.Context, ob
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client OAuth2PermissionGrantClient) DeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DeleteResponder handles the response to the Delete request. The method always
@ -260,8 +260,7 @@ func (client OAuth2PermissionGrantClient) ListPreparer(ctx context.Context, filt
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client OAuth2PermissionGrantClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListResponder handles the response to the List request. The method always
@ -351,8 +350,7 @@ func (client OAuth2PermissionGrantClient) ListNextPreparer(ctx context.Context,
// ListNextSender sends the ListNext request. The method will close the
// http.Response Body if it receives an error.
func (client OAuth2PermissionGrantClient) ListNextSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListNextResponder handles the response to the ListNext request. The method always

View File

@ -36,7 +36,8 @@ func NewObjectsClient(tenantID string) ObjectsClient {
return NewObjectsClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewObjectsClientWithBaseURI creates an instance of the ObjectsClient client.
// NewObjectsClientWithBaseURI creates an instance of the ObjectsClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewObjectsClientWithBaseURI(baseURI string, tenantID string) ObjectsClient {
return ObjectsClient{NewWithBaseURI(baseURI, tenantID)}
}
@ -107,8 +108,7 @@ func (client ObjectsClient) GetObjectsByObjectIdsPreparer(ctx context.Context, p
// GetObjectsByObjectIdsSender sends the GetObjectsByObjectIds request. The method will close the
// http.Response Body if it receives an error.
func (client ObjectsClient) GetObjectsByObjectIdsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetObjectsByObjectIdsResponder handles the response to the GetObjectsByObjectIds request. The method always
@ -198,8 +198,7 @@ func (client ObjectsClient) GetObjectsByObjectIdsNextPreparer(ctx context.Contex
// GetObjectsByObjectIdsNextSender sends the GetObjectsByObjectIdsNext request. The method will close the
// http.Response Body if it receives an error.
func (client ObjectsClient) GetObjectsByObjectIdsNextSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetObjectsByObjectIdsNextResponder handles the response to the GetObjectsByObjectIdsNext request. The method always

View File

@ -37,7 +37,9 @@ func NewServicePrincipalsClient(tenantID string) ServicePrincipalsClient {
return NewServicePrincipalsClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewServicePrincipalsClientWithBaseURI creates an instance of the ServicePrincipalsClient client.
// NewServicePrincipalsClientWithBaseURI creates an instance of the ServicePrincipalsClient client using a custom
// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
// stack).
func NewServicePrincipalsClientWithBaseURI(baseURI string, tenantID string) ServicePrincipalsClient {
return ServicePrincipalsClient{NewWithBaseURI(baseURI, tenantID)}
}
@ -107,8 +109,7 @@ func (client ServicePrincipalsClient) CreatePreparer(ctx context.Context, parame
// CreateSender sends the Create request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) CreateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// CreateResponder handles the response to the Create request. The method always
@ -182,8 +183,7 @@ func (client ServicePrincipalsClient) DeletePreparer(ctx context.Context, object
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) DeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DeleteResponder handles the response to the Delete request. The method always
@ -256,8 +256,7 @@ func (client ServicePrincipalsClient) GetPreparer(ctx context.Context, objectID
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetResponder handles the response to the Get request. The method always
@ -339,8 +338,7 @@ func (client ServicePrincipalsClient) ListPreparer(ctx context.Context, filter s
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListResponder handles the response to the List request. The method always
@ -430,8 +428,7 @@ func (client ServicePrincipalsClient) ListKeyCredentialsPreparer(ctx context.Con
// ListKeyCredentialsSender sends the ListKeyCredentials request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) ListKeyCredentialsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListKeyCredentialsResponder handles the response to the ListKeyCredentials request. The method always
@ -505,8 +502,7 @@ func (client ServicePrincipalsClient) ListNextPreparer(ctx context.Context, next
// ListNextSender sends the ListNext request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) ListNextSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListNextResponder handles the response to the ListNext request. The method always
@ -581,8 +577,7 @@ func (client ServicePrincipalsClient) ListOwnersPreparer(ctx context.Context, ob
// ListOwnersSender sends the ListOwners request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) ListOwnersSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListOwnersResponder handles the response to the ListOwners request. The method always
@ -693,8 +688,7 @@ func (client ServicePrincipalsClient) ListPasswordCredentialsPreparer(ctx contex
// ListPasswordCredentialsSender sends the ListPasswordCredentials request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) ListPasswordCredentialsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListPasswordCredentialsResponder handles the response to the ListPasswordCredentials request. The method always
@ -771,8 +765,7 @@ func (client ServicePrincipalsClient) UpdatePreparer(ctx context.Context, object
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) UpdateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// UpdateResponder handles the response to the Update request. The method always
@ -848,8 +841,7 @@ func (client ServicePrincipalsClient) UpdateKeyCredentialsPreparer(ctx context.C
// UpdateKeyCredentialsSender sends the UpdateKeyCredentials request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) UpdateKeyCredentialsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// UpdateKeyCredentialsResponder handles the response to the UpdateKeyCredentials request. The method always
@ -925,8 +917,7 @@ func (client ServicePrincipalsClient) UpdatePasswordCredentialsPreparer(ctx cont
// UpdatePasswordCredentialsSender sends the UpdatePasswordCredentials request. The method will close the
// http.Response Body if it receives an error.
func (client ServicePrincipalsClient) UpdatePasswordCredentialsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// UpdatePasswordCredentialsResponder handles the response to the UpdatePasswordCredentials request. The method always

View File

@ -36,7 +36,8 @@ func NewSignedInUserClient(tenantID string) SignedInUserClient {
return NewSignedInUserClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewSignedInUserClientWithBaseURI creates an instance of the SignedInUserClient client.
// NewSignedInUserClientWithBaseURI creates an instance of the SignedInUserClient client using a custom endpoint. Use
// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewSignedInUserClientWithBaseURI(baseURI string, tenantID string) SignedInUserClient {
return SignedInUserClient{NewWithBaseURI(baseURI, tenantID)}
}
@ -96,8 +97,7 @@ func (client SignedInUserClient) GetPreparer(ctx context.Context) (*http.Request
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client SignedInUserClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetResponder handles the response to the Get request. The method always
@ -174,8 +174,7 @@ func (client SignedInUserClient) ListOwnedObjectsPreparer(ctx context.Context) (
// ListOwnedObjectsSender sends the ListOwnedObjects request. The method will close the
// http.Response Body if it receives an error.
func (client SignedInUserClient) ListOwnedObjectsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListOwnedObjectsResponder handles the response to the ListOwnedObjects request. The method always
@ -265,8 +264,7 @@ func (client SignedInUserClient) ListOwnedObjectsNextPreparer(ctx context.Contex
// ListOwnedObjectsNextSender sends the ListOwnedObjectsNext request. The method will close the
// http.Response Body if it receives an error.
func (client SignedInUserClient) ListOwnedObjectsNextSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListOwnedObjectsNextResponder handles the response to the ListOwnedObjectsNext request. The method always

View File

@ -37,7 +37,8 @@ func NewUsersClient(tenantID string) UsersClient {
return NewUsersClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewUsersClientWithBaseURI creates an instance of the UsersClient client.
// NewUsersClientWithBaseURI creates an instance of the UsersClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewUsersClientWithBaseURI(baseURI string, tenantID string) UsersClient {
return UsersClient{NewWithBaseURI(baseURI, tenantID)}
}
@ -112,8 +113,7 @@ func (client UsersClient) CreatePreparer(ctx context.Context, parameters UserCre
// CreateSender sends the Create request. The method will close the
// http.Response Body if it receives an error.
func (client UsersClient) CreateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// CreateResponder handles the response to the Create request. The method always
@ -187,8 +187,7 @@ func (client UsersClient) DeletePreparer(ctx context.Context, upnOrObjectID stri
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client UsersClient) DeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DeleteResponder handles the response to the Delete request. The method always
@ -261,8 +260,7 @@ func (client UsersClient) GetPreparer(ctx context.Context, upnOrObjectID string)
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client UsersClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetResponder handles the response to the Get request. The method always
@ -345,8 +343,7 @@ func (client UsersClient) GetMemberGroupsPreparer(ctx context.Context, objectID
// GetMemberGroupsSender sends the GetMemberGroups request. The method will close the
// http.Response Body if it receives an error.
func (client UsersClient) GetMemberGroupsSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetMemberGroupsResponder handles the response to the GetMemberGroups request. The method always
@ -428,8 +425,7 @@ func (client UsersClient) ListPreparer(ctx context.Context, filter string) (*htt
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client UsersClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListResponder handles the response to the List request. The method always
@ -519,8 +515,7 @@ func (client UsersClient) ListNextPreparer(ctx context.Context, nextLink string)
// ListNextSender sends the ListNext request. The method will close the
// http.Response Body if it receives an error.
func (client UsersClient) ListNextSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// ListNextResponder handles the response to the ListNext request. The method always
@ -597,8 +592,7 @@ func (client UsersClient) UpdatePreparer(ctx context.Context, upnOrObjectID stri
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client UsersClient) UpdateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// UpdateResponder handles the response to the Update request. The method always

View File

@ -41,7 +41,8 @@ func New(subscriptionID string) BaseClient {
return NewWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewWithBaseURI creates an instance of the BaseClient client.
// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with
// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return BaseClient{
Client: autorest.NewClientWithUserAgent(UserAgent()),

View File

@ -36,7 +36,9 @@ func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsCl
return NewDeploymentOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewDeploymentOperationsClientWithBaseURI creates an instance of the DeploymentOperationsClient client.
// NewDeploymentOperationsClientWithBaseURI creates an instance of the DeploymentOperationsClient client using a custom
// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
// stack).
func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient {
return DeploymentOperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
@ -111,8 +113,7 @@ func (client DeploymentOperationsClient) GetPreparer(ctx context.Context, resour
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentOperationsClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
@ -201,8 +202,7 @@ func (client DeploymentOperationsClient) ListPreparer(ctx context.Context, resou
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentOperationsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always

View File

@ -36,7 +36,8 @@ func NewDeploymentsClient(subscriptionID string) DeploymentsClient {
return NewDeploymentsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewDeploymentsClientWithBaseURI creates an instance of the DeploymentsClient client.
// NewDeploymentsClientWithBaseURI creates an instance of the DeploymentsClient client using a custom endpoint. Use
// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient {
return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
@ -96,8 +97,7 @@ func (client DeploymentsClient) CalculateTemplateHashPreparer(ctx context.Contex
// CalculateTemplateHashSender sends the CalculateTemplateHash request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentsClient) CalculateTemplateHashSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// CalculateTemplateHashResponder handles the response to the CalculateTemplateHash request. The method always
@ -181,8 +181,7 @@ func (client DeploymentsClient) CancelPreparer(ctx context.Context, resourceGrou
// CancelSender sends the Cancel request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentsClient) CancelSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CancelResponder handles the response to the Cancel request. The method always
@ -265,8 +264,7 @@ func (client DeploymentsClient) CheckExistencePreparer(ctx context.Context, reso
// CheckExistenceSender sends the CheckExistence request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CheckExistenceResponder handles the response to the CheckExistence request. The method always
@ -353,9 +351,8 @@ func (client DeploymentsClient) CreateOrUpdatePreparer(ctx context.Context, reso
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (future DeploymentsCreateOrUpdateFuture, err error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
resp, err = autorest.SendWithSender(client, req, sd...)
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
@ -438,9 +435,8 @@ func (client DeploymentsClient) DeletePreparer(ctx context.Context, resourceGrou
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentsClient) DeleteSender(req *http.Request) (future DeploymentsDeleteFuture, err error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
resp, err = autorest.SendWithSender(client, req, sd...)
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
@ -528,8 +524,7 @@ func (client DeploymentsClient) ExportTemplatePreparer(ctx context.Context, reso
// ExportTemplateSender sends the ExportTemplate request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentsClient) ExportTemplateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ExportTemplateResponder handles the response to the ExportTemplate request. The method always
@ -613,8 +608,7 @@ func (client DeploymentsClient) GetPreparer(ctx context.Context, resourceGroupNa
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentsClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
@ -705,8 +699,7 @@ func (client DeploymentsClient) ListPreparer(ctx context.Context, resourceGroupN
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
@ -837,8 +830,7 @@ func (client DeploymentsClient) ValidatePreparer(ctx context.Context, resourceGr
// ValidateSender sends the Validate request. The method will close the
// http.Response Body if it receives an error.
func (client DeploymentsClient) ValidateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ValidateResponder handles the response to the Validate request. The method always

View File

@ -36,7 +36,8 @@ func NewGroupsClient(subscriptionID string) GroupsClient {
return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client.
// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient {
return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
@ -107,8 +108,7 @@ func (client GroupsClient) CheckExistencePreparer(ctx context.Context, resourceG
// CheckExistenceSender sends the CheckExistence request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CheckExistenceResponder handles the response to the CheckExistence request. The method always
@ -195,8 +195,7 @@ func (client GroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceG
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
@ -272,9 +271,8 @@ func (client GroupsClient) DeletePreparer(ctx context.Context, resourceGroupName
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) DeleteSender(req *http.Request) (future GroupsDeleteFuture, err error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
resp, err = autorest.SendWithSender(client, req, sd...)
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
@ -363,8 +361,7 @@ func (client GroupsClient) ExportTemplatePreparer(ctx context.Context, resourceG
// ExportTemplateSender sends the ExportTemplate request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) ExportTemplateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ExportTemplateResponder handles the response to the ExportTemplate request. The method always
@ -446,8 +443,7 @@ func (client GroupsClient) GetPreparer(ctx context.Context, resourceGroupName st
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
@ -528,8 +524,7 @@ func (client GroupsClient) ListPreparer(ctx context.Context, filter string, top
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
@ -586,7 +581,8 @@ func (client GroupsClient) ListComplete(ctx context.Context, filter string, top
// Parameters:
// resourceGroupName - query parameters. If null is passed returns all resource groups.
// filter - the filter to apply on the operation.
// expand - the $expand query parameter
// expand - comma-separated list of additional properties to be included in the response. Valid values include
// `createdTime`, `changedTime` and `provisioningState`. For example, `$expand=createdTime,changedTime`.
// top - query parameters. If null is passed returns all resource groups.
func (client GroupsClient) ListResources(ctx context.Context, resourceGroupName string, filter string, expand string, top *int32) (result ListResultPage, err error) {
if tracing.IsEnabled() {
@ -661,8 +657,7 @@ func (client GroupsClient) ListResourcesPreparer(ctx context.Context, resourceGr
// ListResourcesSender sends the ListResources request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) ListResourcesSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResourcesResponder handles the response to the ListResources request. The method always
@ -787,8 +782,7 @@ func (client GroupsClient) PatchPreparer(ctx context.Context, resourceGroupName
// PatchSender sends the Patch request. The method will close the
// http.Response Body if it receives an error.
func (client GroupsClient) PatchSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// PatchResponder handles the response to the Patch request. The method always

View File

@ -654,6 +654,68 @@ func (gr GenericResource) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
// GenericResourceExpanded resource information.
type GenericResourceExpanded struct {
// CreatedTime - READ-ONLY; The created time of the resource. This is only present if requested via the $expand query parameter.
CreatedTime *date.Time `json:"createdTime,omitempty"`
// ChangedTime - READ-ONLY; The changed time of the resource. This is only present if requested via the $expand query parameter.
ChangedTime *date.Time `json:"changedTime,omitempty"`
// ProvisioningState - READ-ONLY; The provisioning state of the resource. This is only present if requested via the $expand query parameter.
ProvisioningState *string `json:"provisioningState,omitempty"`
// Plan - The plan of the resource.
Plan *Plan `json:"plan,omitempty"`
// Properties - The resource properties.
Properties interface{} `json:"properties,omitempty"`
// Kind - The kind of the resource.
Kind *string `json:"kind,omitempty"`
// ManagedBy - Id of the resource that manages this resource.
ManagedBy *string `json:"managedBy,omitempty"`
// Sku - The sku of the resource.
Sku *Sku `json:"sku,omitempty"`
// Identity - The identity of the resource.
Identity *Identity `json:"identity,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for GenericResourceExpanded.
func (gre GenericResourceExpanded) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if gre.Plan != nil {
objectMap["plan"] = gre.Plan
}
if gre.Properties != nil {
objectMap["properties"] = gre.Properties
}
if gre.Kind != nil {
objectMap["kind"] = gre.Kind
}
if gre.ManagedBy != nil {
objectMap["managedBy"] = gre.ManagedBy
}
if gre.Sku != nil {
objectMap["sku"] = gre.Sku
}
if gre.Identity != nil {
objectMap["identity"] = gre.Identity
}
if gre.Location != nil {
objectMap["location"] = gre.Location
}
if gre.Tags != nil {
objectMap["tags"] = gre.Tags
}
return json.Marshal(objectMap)
}
// GenericResourceFilter resource filter.
type GenericResourceFilter struct {
// ResourceType - The resource type.
@ -907,12 +969,12 @@ type Identity struct {
type ListResult struct {
autorest.Response `json:"-"`
// Value - The list of resources.
Value *[]GenericResource `json:"value,omitempty"`
Value *[]GenericResourceExpanded `json:"value,omitempty"`
// NextLink - The URL to get the next set of results.
NextLink *string `json:"nextLink,omitempty"`
}
// ListResultIterator provides access to a complete listing of GenericResource values.
// ListResultIterator provides access to a complete listing of GenericResourceExpanded values.
type ListResultIterator struct {
i int
page ListResultPage
@ -963,9 +1025,9 @@ func (iter ListResultIterator) Response() ListResult {
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter ListResultIterator) Value() GenericResource {
func (iter ListResultIterator) Value() GenericResourceExpanded {
if !iter.page.NotDone() {
return GenericResource{}
return GenericResourceExpanded{}
}
return iter.page.Values()[iter.i]
}
@ -992,7 +1054,7 @@ func (lr ListResult) listResultPreparer(ctx context.Context) (*http.Request, err
autorest.WithBaseURL(to.String(lr.NextLink)))
}
// ListResultPage contains a page of GenericResource values.
// ListResultPage contains a page of GenericResourceExpanded values.
type ListResultPage struct {
fn func(context.Context, ListResult) (ListResult, error)
lr ListResult
@ -1037,7 +1099,7 @@ func (page ListResultPage) Response() ListResult {
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page ListResultPage) Values() []GenericResource {
func (page ListResultPage) Values() []GenericResourceExpanded {
if page.lr.IsEmpty() {
return nil
}

View File

@ -35,7 +35,8 @@ func NewProvidersClient(subscriptionID string) ProvidersClient {
return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient client.
// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient client using a custom endpoint. Use this
// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient {
return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)}
}
@ -103,8 +104,7 @@ func (client ProvidersClient) GetPreparer(ctx context.Context, resourceProviderN
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ProvidersClient) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
@ -186,8 +186,7 @@ func (client ProvidersClient) ListPreparer(ctx context.Context, top *int32, expa
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ProvidersClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
@ -298,8 +297,7 @@ func (client ProvidersClient) RegisterPreparer(ctx context.Context, resourceProv
// RegisterSender sends the Register request. The method will close the
// http.Response Body if it receives an error.
func (client ProvidersClient) RegisterSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// RegisterResponder handles the response to the Register request. The method always
@ -373,8 +371,7 @@ func (client ProvidersClient) UnregisterPreparer(ctx context.Context, resourcePr
// UnregisterSender sends the Unregister request. The method will close the
// http.Response Body if it receives an error.
func (client ProvidersClient) UnregisterSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// UnregisterResponder handles the response to the Unregister request. The method always

View File

@ -36,7 +36,8 @@ func NewClient(subscriptionID string) Client {
return NewClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewClientWithBaseURI creates an instance of the Client client.
// NewClientWithBaseURI creates an instance of the Client client using a custom endpoint. Use this when interacting
// with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewClientWithBaseURI(baseURI string, subscriptionID string) Client {
return Client{NewWithBaseURI(baseURI, subscriptionID)}
}
@ -48,7 +49,7 @@ func NewClientWithBaseURI(baseURI string, subscriptionID string) Client {
// parentResourcePath - resource identity.
// resourceType - resource identity.
// resourceName - resource identity.
func (client Client) CheckExistence(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result autorest.Response, err error) {
func (client Client) CheckExistence(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.CheckExistence")
defer func() {
@ -67,7 +68,7 @@ func (client Client) CheckExistence(ctx context.Context, resourceGroupName strin
return result, validation.NewError("resources.Client", "CheckExistence", err.Error())
}
req, err := client.CheckExistencePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName)
req, err := client.CheckExistencePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, APIVersion)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", nil, "Failure preparing request")
return
@ -89,7 +90,7 @@ func (client Client) CheckExistence(ctx context.Context, resourceGroupName strin
}
// CheckExistencePreparer prepares the CheckExistence request.
func (client Client) CheckExistencePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) {
func (client Client) CheckExistencePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"parentResourcePath": parentResourcePath,
"resourceGroupName": autorest.Encode("path", resourceGroupName),
@ -99,7 +100,6 @@ func (client Client) CheckExistencePreparer(ctx context.Context, resourceGroupNa
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-02-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@ -115,8 +115,7 @@ func (client Client) CheckExistencePreparer(ctx context.Context, resourceGroupNa
// CheckExistenceSender sends the CheckExistence request. The method will close the
// http.Response Body if it receives an error.
func (client Client) CheckExistenceSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CheckExistenceResponder handles the response to the CheckExistence request. The method always
@ -139,7 +138,7 @@ func (client Client) CheckExistenceResponder(resp *http.Response) (result autore
// resourceType - resource identity.
// resourceName - resource identity.
// parameters - create or update resource parameters.
func (client Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (result GenericResource, err error) {
func (client Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string, parameters GenericResource) (result GenericResource, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.CreateOrUpdate")
defer func() {
@ -158,7 +157,7 @@ func (client Client) CreateOrUpdate(ctx context.Context, resourceGroupName strin
return result, validation.NewError("resources.Client", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, parameters)
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, APIVersion, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdate", nil, "Failure preparing request")
return
@ -180,7 +179,7 @@ func (client Client) CreateOrUpdate(ctx context.Context, resourceGroupName strin
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client Client) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (*http.Request, error) {
func (client Client) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string, parameters GenericResource) (*http.Request, error) {
pathParameters := map[string]interface{}{
"parentResourcePath": parentResourcePath,
"resourceGroupName": autorest.Encode("path", resourceGroupName),
@ -190,7 +189,6 @@ func (client Client) CreateOrUpdatePreparer(ctx context.Context, resourceGroupNa
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-02-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@ -208,8 +206,7 @@ func (client Client) CreateOrUpdatePreparer(ctx context.Context, resourceGroupNa
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client Client) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
@ -232,7 +229,7 @@ func (client Client) CreateOrUpdateResponder(resp *http.Response) (result Generi
// parentResourcePath - resource identity.
// resourceType - resource identity.
// resourceName - resource identity.
func (client Client) Delete(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result autorest.Response, err error) {
func (client Client) Delete(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Delete")
defer func() {
@ -251,7 +248,7 @@ func (client Client) Delete(ctx context.Context, resourceGroupName string, resou
return result, validation.NewError("resources.Client", "Delete", err.Error())
}
req, err := client.DeletePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName)
req, err := client.DeletePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, APIVersion)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.Client", "Delete", nil, "Failure preparing request")
return
@ -273,7 +270,7 @@ func (client Client) Delete(ctx context.Context, resourceGroupName string, resou
}
// DeletePreparer prepares the Delete request.
func (client Client) DeletePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) {
func (client Client) DeletePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"parentResourcePath": parentResourcePath,
"resourceGroupName": autorest.Encode("path", resourceGroupName),
@ -283,7 +280,6 @@ func (client Client) DeletePreparer(ctx context.Context, resourceGroupName strin
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-02-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@ -299,8 +295,7 @@ func (client Client) DeletePreparer(ctx context.Context, resourceGroupName strin
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client Client) DeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// DeleteResponder handles the response to the Delete request. The method always
@ -322,7 +317,7 @@ func (client Client) DeleteResponder(resp *http.Response) (result autorest.Respo
// parentResourcePath - resource identity.
// resourceType - resource identity.
// resourceName - resource identity.
func (client Client) Get(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result GenericResource, err error) {
func (client Client) Get(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string) (result GenericResource, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Get")
defer func() {
@ -341,7 +336,7 @@ func (client Client) Get(ctx context.Context, resourceGroupName string, resource
return result, validation.NewError("resources.Client", "Get", err.Error())
}
req, err := client.GetPreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName)
req, err := client.GetPreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, APIVersion)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.Client", "Get", nil, "Failure preparing request")
return
@ -363,7 +358,7 @@ func (client Client) Get(ctx context.Context, resourceGroupName string, resource
}
// GetPreparer prepares the Get request.
func (client Client) GetPreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) {
func (client Client) GetPreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"parentResourcePath": parentResourcePath,
"resourceGroupName": autorest.Encode("path", resourceGroupName),
@ -373,7 +368,6 @@ func (client Client) GetPreparer(ctx context.Context, resourceGroupName string,
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-02-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@ -389,8 +383,7 @@ func (client Client) GetPreparer(ctx context.Context, resourceGroupName string,
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client Client) GetSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
@ -409,7 +402,8 @@ func (client Client) GetResponder(resp *http.Response) (result GenericResource,
// List get all of the resources under a subscription.
// Parameters:
// filter - the filter to apply on the operation.
// expand - the $expand query parameter.
// expand - comma-separated list of additional properties to be included in the response. Valid values include
// `createdTime`, `changedTime` and `provisioningState`. For example, `$expand=createdTime,changedTime`.
// top - query parameters. If null is passed returns all resource groups.
func (client Client) List(ctx context.Context, filter string, expand string, top *int32) (result ListResultPage, err error) {
if tracing.IsEnabled() {
@ -475,8 +469,7 @@ func (client Client) ListPreparer(ctx context.Context, filter string, expand str
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client Client) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
@ -593,9 +586,8 @@ func (client Client) MoveResourcesPreparer(ctx context.Context, sourceResourceGr
// MoveResourcesSender sends the MoveResources request. The method will close the
// http.Response Body if it receives an error.
func (client Client) MoveResourcesSender(req *http.Request) (future MoveResourcesFuture, err error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
resp, err = autorest.SendWithSender(client, req, sd...)
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
@ -622,8 +614,9 @@ func (client Client) MoveResourcesResponder(resp *http.Response) (result autores
// parentResourcePath - the parent resource identity.
// resourceType - the resource type of the resource to update.
// resourceName - the name of the resource to update.
// APIVersion - the API version to use for the operation.
// parameters - parameters for updating the resource.
func (client Client) Update(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (result UpdateFuture, err error) {
func (client Client) Update(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string, parameters GenericResource) (result UpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Update")
defer func() {
@ -642,7 +635,7 @@ func (client Client) Update(ctx context.Context, resourceGroupName string, resou
return result, validation.NewError("resources.Client", "Update", err.Error())
}
req, err := client.UpdatePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, parameters)
req, err := client.UpdatePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, APIVersion, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.Client", "Update", nil, "Failure preparing request")
return
@ -658,7 +651,7 @@ func (client Client) Update(ctx context.Context, resourceGroupName string, resou
}
// UpdatePreparer prepares the Update request.
func (client Client) UpdatePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (*http.Request, error) {
func (client Client) UpdatePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, APIVersion string, parameters GenericResource) (*http.Request, error) {
pathParameters := map[string]interface{}{
"parentResourcePath": parentResourcePath,
"resourceGroupName": autorest.Encode("path", resourceGroupName),
@ -668,7 +661,6 @@ func (client Client) UpdatePreparer(ctx context.Context, resourceGroupName strin
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-02-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
@ -686,9 +678,8 @@ func (client Client) UpdatePreparer(ctx context.Context, resourceGroupName strin
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client Client) UpdateSender(req *http.Request) (future UpdateFuture, err error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
resp, err = autorest.SendWithSender(client, req, sd...)
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}

View File

@ -35,7 +35,8 @@ func NewTagsClient(subscriptionID string) TagsClient {
return NewTagsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewTagsClientWithBaseURI creates an instance of the TagsClient client.
// NewTagsClientWithBaseURI creates an instance of the TagsClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient {
return TagsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
@ -98,8 +99,7 @@ func (client TagsClient) CreateOrUpdatePreparer(ctx context.Context, tagName str
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client TagsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
@ -175,8 +175,7 @@ func (client TagsClient) CreateOrUpdateValuePreparer(ctx context.Context, tagNam
// CreateOrUpdateValueSender sends the CreateOrUpdateValue request. The method will close the
// http.Response Body if it receives an error.
func (client TagsClient) CreateOrUpdateValueSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CreateOrUpdateValueResponder handles the response to the CreateOrUpdateValue request. The method always
@ -250,8 +249,7 @@ func (client TagsClient) DeletePreparer(ctx context.Context, tagName string) (*h
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client TagsClient) DeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// DeleteResponder handles the response to the Delete request. The method always
@ -326,8 +324,7 @@ func (client TagsClient) DeleteValuePreparer(ctx context.Context, tagName string
// DeleteValueSender sends the DeleteValue request. The method will close the
// http.Response Body if it receives an error.
func (client TagsClient) DeleteValueSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// DeleteValueResponder handles the response to the DeleteValue request. The method always
@ -398,8 +395,7 @@ func (client TagsClient) ListPreparer(ctx context.Context) (*http.Request, error
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client TagsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always

View File

@ -36,7 +36,8 @@ func NewAccountsClient(subscriptionID string) AccountsClient {
return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client.
// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client using a custom endpoint. Use this
// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient {
return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
@ -108,8 +109,7 @@ func (client AccountsClient) CheckNameAvailabilityPreparer(ctx context.Context,
// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the
// http.Response Body if it receives an error.
func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always
@ -202,9 +202,8 @@ func (client AccountsClient) CreatePreparer(ctx context.Context, resourceGroupNa
// CreateSender sends the Create request. The method will close the
// http.Response Body if it receives an error.
func (client AccountsClient) CreateSender(req *http.Request) (future AccountsCreateFuture, err error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
var resp *http.Response
resp, err = autorest.SendWithSender(client, req, sd...)
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
@ -293,8 +292,7 @@ func (client AccountsClient) DeletePreparer(ctx context.Context, resourceGroupNa
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// DeleteResponder handles the response to the Delete request. The method always
@ -378,8 +376,7 @@ func (client AccountsClient) GetPropertiesPreparer(ctx context.Context, resource
// GetPropertiesSender sends the GetProperties request. The method will close the
// http.Response Body if it receives an error.
func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetPropertiesResponder handles the response to the GetProperties request. The method always
@ -451,8 +448,7 @@ func (client AccountsClient) ListPreparer(ctx context.Context) (*http.Request, e
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
@ -527,8 +523,7 @@ func (client AccountsClient) ListByResourceGroupPreparer(ctx context.Context, re
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
@ -612,8 +607,7 @@ func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroup
// ListKeysSender sends the ListKeys request. The method will close the
// http.Response Body if it receives an error.
func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListKeysResponder handles the response to the ListKeys request. The method always
@ -702,8 +696,7 @@ func (client AccountsClient) RegenerateKeyPreparer(ctx context.Context, resource
// RegenerateKeySender sends the RegenerateKey request. The method will close the
// http.Response Body if it receives an error.
func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always
@ -795,8 +788,7 @@ func (client AccountsClient) UpdatePreparer(ctx context.Context, resourceGroupNa
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// UpdateResponder handles the response to the Update request. The method always

View File

@ -41,7 +41,8 @@ func New(subscriptionID string) BaseClient {
return NewWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewWithBaseURI creates an instance of the BaseClient client.
// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with
// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return BaseClient{
Client: autorest.NewClientWithUserAgent(UserAgent()),

View File

@ -35,7 +35,8 @@ func NewUsageClient(subscriptionID string) UsageClient {
return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewUsageClientWithBaseURI creates an instance of the UsageClient client.
// NewUsageClientWithBaseURI creates an instance of the UsageClient client using a custom endpoint. Use this when
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient {
return UsageClient{NewWithBaseURI(baseURI, subscriptionID)}
}
@ -95,8 +96,7 @@ func (client UsageClient) ListPreparer(ctx context.Context) (*http.Request, erro
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) {
sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client))
return autorest.SendWithSender(client, req, sd...)
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always

View File

@ -1,22 +0,0 @@
# Azure Storage SDK for Go (Preview)
:exclamation: IMPORTANT: This package is in maintenance only and will be deprecated in the
future. Please use one of the following packages instead.
| Service | Import Path/Repo |
|---------|------------------|
| Storage - Blobs | [github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) |
| Storage - Files | [github.com/Azure/azure-storage-file-go](https://github.com/Azure/azure-storage-file-go) |
| Storage - Queues | [github.com/Azure/azure-storage-queue-go](https://github.com/Azure/azure-storage-queue-go) |
The `github.com/Azure/azure-sdk-for-go/storage` package is used to manage
[Azure Storage](https://docs.microsoft.com/en-us/azure/storage/) data plane
resources: containers, blobs, tables, and queues.
To manage storage *accounts* use Azure Resource Manager (ARM) via the packages
at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/services/storage).
This package also supports the [Azure Storage
Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
(Windows only).

View File

@ -1,91 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"crypto/md5"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"time"
)
// PutAppendBlob initializes an empty append blob with specified name. An
// append blob must be created using this method before appending blocks.
//
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeAppend)
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeAppend)
}
// AppendBlockOptions includes the options for an append block operation
type AppendBlockOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
MaxSize *uint `header:"x-ms-blob-condition-maxsize"`
AppendPosition *uint `header:"x-ms-blob-condition-appendpos"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
ContentMD5 bool
}
// AppendBlock appends a block to an append blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block
func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
params := url.Values{"comp": {"appendblock"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeAppend)
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
if options.ContentMD5 {
md5sum := md5.Sum(chunk)
headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:])
}
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeAppend)
}

View File

@ -1,246 +0,0 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
"net/url"
"sort"
"strings"
)
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
type authentication string
const (
sharedKey authentication = "sharedKey"
sharedKeyForTable authentication = "sharedKeyTable"
sharedKeyLite authentication = "sharedKeyLite"
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
// headers
headerAcceptCharset = "Accept-Charset"
headerAuthorization = "Authorization"
headerContentLength = "Content-Length"
headerDate = "Date"
headerXmsDate = "x-ms-date"
headerXmsVersion = "x-ms-version"
headerContentEncoding = "Content-Encoding"
headerContentLanguage = "Content-Language"
headerContentType = "Content-Type"
headerContentMD5 = "Content-MD5"
headerIfModifiedSince = "If-Modified-Since"
headerIfMatch = "If-Match"
headerIfNoneMatch = "If-None-Match"
headerIfUnmodifiedSince = "If-Unmodified-Since"
headerRange = "Range"
headerDataServiceVersion = "DataServiceVersion"
headerMaxDataServiceVersion = "MaxDataServiceVersion"
headerContentTransferEncoding = "Content-Transfer-Encoding"
)
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
if !c.sasClient {
authHeader, err := c.getSharedKey(verb, url, headers, auth)
if err != nil {
return nil, err
}
headers[headerAuthorization] = authHeader
}
return headers, nil
}
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
canRes, err := c.buildCanonicalizedResource(url, auth, false)
if err != nil {
return "", err
}
canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
if err != nil {
return "", err
}
return c.createAuthorizationHeader(canString, auth), nil
}
func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) {
errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := bytes.NewBufferString("")
if c.accountName != StorageEmulatorAccountName || !sas {
cr.WriteString("/")
cr.WriteString(c.getCanonicalizedAccountName())
}
if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr.WriteString(u.EscapedPath())
}
params, err := url.ParseQuery(u.RawQuery)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
if auth == sharedKey {
if len(params) > 0 {
cr.WriteString("\n")
keys := []string{}
for key := range params {
keys = append(keys, key)
}
sort.Strings(keys)
completeParams := []string{}
for _, key := range keys {
if len(params[key]) > 1 {
sort.Strings(params[key])
}
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
}
cr.WriteString(strings.Join(completeParams, "\n"))
}
} else {
// search for "comp" parameter, if exists then add it to canonicalizedresource
if v, ok := params["comp"]; ok {
cr.WriteString("?comp=" + v[0])
}
}
return string(cr.Bytes()), nil
}
func (c *Client) getCanonicalizedAccountName() string {
// since we may be trying to access a secondary storage account, we need to
// remove the -secondary part of the storage name
return strings.TrimSuffix(c.accountName, "-secondary")
}
func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
contentLength := headers[headerContentLength]
if contentLength == "0" {
contentLength = ""
}
date := headers[headerDate]
if v, ok := headers[headerXmsDate]; ok {
if auth == sharedKey || auth == sharedKeyLite {
date = ""
} else {
date = v
}
}
var canString string
switch auth {
case sharedKey:
canString = strings.Join([]string{
verb,
headers[headerContentEncoding],
headers[headerContentLanguage],
contentLength,
headers[headerContentMD5],
headers[headerContentType],
date,
headers[headerIfModifiedSince],
headers[headerIfMatch],
headers[headerIfNoneMatch],
headers[headerIfUnmodifiedSince],
headers[headerRange],
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case sharedKeyForTable:
canString = strings.Join([]string{
verb,
headers[headerContentMD5],
headers[headerContentType],
date,
canonicalizedResource,
}, "\n")
case sharedKeyLite:
canString = strings.Join([]string{
verb,
headers[headerContentMD5],
headers[headerContentType],
date,
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case sharedKeyLiteForTable:
canString = strings.Join([]string{
date,
canonicalizedResource,
}, "\n")
default:
return "", fmt.Errorf("%s authentication is not supported yet", auth)
}
return canString, nil
}
func buildCanonicalizedHeader(headers map[string]string) string {
cm := make(map[string]string)
for k, v := range headers {
headerName := strings.TrimSpace(strings.ToLower(k))
if strings.HasPrefix(headerName, "x-ms-") {
cm[headerName] = v
}
}
if len(cm) == 0 {
return ""
}
keys := []string{}
for key := range cm {
keys = append(keys, key)
}
sort.Strings(keys)
ch := bytes.NewBufferString("")
for _, key := range keys {
ch.WriteString(key)
ch.WriteRune(':')
ch.WriteString(cm[key])
ch.WriteRune('\n')
}
return strings.TrimSuffix(string(ch.Bytes()), "\n")
}
func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
signature := c.computeHmac256(canonicalizedString)
var key string
switch auth {
case sharedKey, sharedKeyForTable:
key = "SharedKey"
case sharedKeyLite, sharedKeyLiteForTable:
key = "SharedKeyLite"
}
return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
}

View File

@ -1,632 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// A Blob is an entry in BlobListResponse.
type Blob struct {
Container *Container
Name string `xml:"Name"`
Snapshot time.Time `xml:"Snapshot"`
Properties BlobProperties `xml:"Properties"`
Metadata BlobMetadata `xml:"Metadata"`
}
// PutBlobOptions includes the options any put blob operation
// (page, block, append)
type PutBlobOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
Origin string `header:"Origin"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// BlobMetadata is a set of custom name/value pairs.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
type BlobMetadata map[string]string
type blobMetadataEntries struct {
Entries []blobMetadataEntry `xml:",any"`
}
type blobMetadataEntry struct {
XMLName xml.Name
Value string `xml:",chardata"`
}
// UnmarshalXML converts the xml:Metadata into Metadata map
func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var entries blobMetadataEntries
if err := d.DecodeElement(&entries, &start); err != nil {
return err
}
for _, entry := range entries.Entries {
if *bm == nil {
*bm = make(BlobMetadata)
}
(*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
}
return nil
}
// MarshalXML implements the xml.Marshaler interface. It encodes
// metadata name/value pairs as they would appear in an Azure
// ListBlobs response.
func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
entries := make([]blobMetadataEntry, 0, len(bm))
for k, v := range bm {
entries = append(entries, blobMetadataEntry{
XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)},
Value: v,
})
}
return enc.EncodeElement(blobMetadataEntries{
Entries: entries,
}, start)
}
// BlobProperties contains various properties of a blob
// returned in various endpoints like ListBlobs or GetBlobProperties.
type BlobProperties struct {
LastModified TimeRFC1123 `xml:"Last-Modified"`
Etag string `xml:"Etag"`
ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"`
ContentLength int64 `xml:"Content-Length"`
ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"`
ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"`
CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"`
ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"`
ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"`
BlobType BlobType `xml:"BlobType"`
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
CopyID string `xml:"CopyId"`
CopyStatus string `xml:"CopyStatus"`
CopySource string `xml:"CopySource"`
CopyProgress string `xml:"CopyProgress"`
CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"`
CopyStatusDescription string `xml:"CopyStatusDescription"`
LeaseStatus string `xml:"LeaseStatus"`
LeaseState string `xml:"LeaseState"`
LeaseDuration string `xml:"LeaseDuration"`
ServerEncrypted bool `xml:"ServerEncrypted"`
IncrementalCopy bool `xml:"IncrementalCopy"`
}
// BlobType defines the type of the Azure Blob.
type BlobType string
// Types of page blobs
const (
BlobTypeBlock BlobType = "BlockBlob"
BlobTypePage BlobType = "PageBlob"
BlobTypeAppend BlobType = "AppendBlob"
)
func (b *Blob) buildPath() string {
return b.Container.buildPath() + "/" + b.Name
}
// Exists returns true if a blob with given name exists on the specified
// container of the storage account.
func (b *Blob) Exists() (bool, error) {
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil)
headers := b.Container.bsc.client.getStandardHeaders()
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, nil
}
}
return false, err
}
// GetURL gets the canonical URL to the blob with the specified name in the
// specified container.
// This method does not create a publicly accessible URL if the blob or container
// is private and this method does not check if the blob exists.
func (b *Blob) GetURL() string {
container := b.Container.Name
if container == "" {
container = "$root"
}
return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil)
}
// GetBlobRangeOptions includes the options for a get blob range operation
type GetBlobRangeOptions struct {
Range *BlobRange
GetRangeContentMD5 bool
*GetBlobOptions
}
// GetBlobOptions includes the options for a get blob operation
type GetBlobOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
Origin string `header:"Origin"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// BlobRange represents the bytes range to be get
type BlobRange struct {
Start uint64
End uint64
}
func (br BlobRange) String() string {
if br.End == 0 {
return fmt.Sprintf("bytes=%d-", br.Start)
}
return fmt.Sprintf("bytes=%d-%d", br.Start, br.End)
}
// Get returns a stream to read the blob. Caller must call both Read and Close()
// to correctly close the underlying connection.
//
// See the GetRange method for use with a Range header.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) {
rangeOptions := GetBlobRangeOptions{
GetBlobOptions: options,
}
resp, err := b.getRange(&rangeOptions)
if err != nil {
return nil, err
}
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
if err := b.writeProperties(resp.Header, true); err != nil {
return resp.Body, err
}
return resp.Body, nil
}
// GetRange reads the specified range of a blob to a stream. The bytesRange
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
// Caller must call both Read and Close()// to correctly close the underlying
// connection.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) {
resp, err := b.getRange(options)
if err != nil {
return nil, err
}
if err := checkRespCode(resp, []int{http.StatusPartialContent}); err != nil {
return nil, err
}
// Content-Length header should not be updated, as the service returns the range length
// (which is not alwys the full blob length)
if err := b.writeProperties(resp.Header, false); err != nil {
return resp.Body, err
}
return resp.Body, nil
}
func (b *Blob) getRange(options *GetBlobRangeOptions) (*http.Response, error) {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
if options.Range != nil {
headers["Range"] = options.Range.String()
if options.GetRangeContentMD5 {
headers["x-ms-range-get-content-md5"] = "true"
}
}
if options.GetBlobOptions != nil {
headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions))
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
}
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return nil, err
}
return resp, err
}
// SnapshotOptions includes the options for a snapshot blob operation
type SnapshotOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// CreateSnapshot creates a snapshot for a blob
// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) {
params := url.Values{"comp": {"snapshot"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil || resp == nil {
return nil, err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil {
return nil, err
}
snapshotResponse := resp.Header.Get(http.CanonicalHeaderKey("x-ms-snapshot"))
if snapshotResponse != "" {
snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse)
if err != nil {
return nil, err
}
return &snapshotTimestamp, nil
}
return nil, errors.New("Snapshot not created")
}
// GetBlobPropertiesOptions includes the options for a get blob properties operation
type GetBlobPropertiesOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetProperties provides various information about the specified blob.
// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
return b.writeProperties(resp.Header, true)
}
func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error {
var err error
contentLength := b.Properties.ContentLength
if includeContentLen {
contentLengthStr := h.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return err
}
}
}
var sequenceNum int64
sequenceNumStr := h.Get("x-ms-blob-sequence-number")
if sequenceNumStr != "" {
sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
if err != nil {
return err
}
}
lastModified, err := getTimeFromHeaders(h, "Last-Modified")
if err != nil {
return err
}
copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time")
if err != nil {
return err
}
b.Properties = BlobProperties{
LastModified: TimeRFC1123(*lastModified),
Etag: h.Get("Etag"),
ContentMD5: h.Get("Content-MD5"),
ContentLength: contentLength,
ContentEncoding: h.Get("Content-Encoding"),
ContentType: h.Get("Content-Type"),
ContentDisposition: h.Get("Content-Disposition"),
CacheControl: h.Get("Cache-Control"),
ContentLanguage: h.Get("Content-Language"),
SequenceNumber: sequenceNum,
CopyCompletionTime: TimeRFC1123(*copyCompletionTime),
CopyStatusDescription: h.Get("x-ms-copy-status-description"),
CopyID: h.Get("x-ms-copy-id"),
CopyProgress: h.Get("x-ms-copy-progress"),
CopySource: h.Get("x-ms-copy-source"),
CopyStatus: h.Get("x-ms-copy-status"),
BlobType: BlobType(h.Get("x-ms-blob-type")),
LeaseStatus: h.Get("x-ms-lease-status"),
LeaseState: h.Get("x-ms-lease-state"),
}
b.writeMetadata(h)
return nil
}
// SetBlobPropertiesOptions contains various properties of a blob and is an entry
// in SetProperties
type SetBlobPropertiesOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
Origin string `header:"Origin"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
SequenceNumberAction *SequenceNumberAction
RequestID string `header:"x-ms-client-request-id"`
}
// SequenceNumberAction defines how the blob's sequence number should be modified
type SequenceNumberAction string
// Options for sequence number action
const (
SequenceNumberActionMax SequenceNumberAction = "max"
SequenceNumberActionUpdate SequenceNumberAction = "update"
SequenceNumberActionIncrement SequenceNumberAction = "increment"
)
// SetProperties replaces the BlobHeaders for the specified blob.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties
func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error {
params := url.Values{"comp": {"properties"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
if b.Properties.BlobType == BlobTypePage {
headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength))
if options != nil && options.SequenceNumberAction != nil {
headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction))
if *options.SequenceNumberAction != SequenceNumberActionIncrement {
headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber))
}
}
}
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// SetBlobMetadataOptions includes the options for a set blob metadata operation
type SetBlobMetadataOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// SetMetadata replaces the metadata for the specified blob.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// GetBlobMetadataOptions includes the options for a get blob metadata operation
type GetBlobMetadataOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetMetadata returns all user-defined metadata for the specified blob.
//
// All metadata keys will be returned in lower case. (HTTP header
// names are case-insensitive.)
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
b.writeMetadata(resp.Header)
return nil
}
func (b *Blob) writeMetadata(h http.Header) {
b.Metadata = BlobMetadata(writeMetadata(h))
}
// DeleteBlobOptions includes the options for a delete blob operation
type DeleteBlobOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
DeleteSnapshots *bool
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// Delete deletes the given blob from the specified container.
// If the blob does not exists at the time of the Delete Blob operation, it
// returns error.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
func (b *Blob) Delete(options *DeleteBlobOptions) error {
resp, err := b.delete(options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}
// DeleteIfExists deletes the given blob from the specified container If the
// blob is deleted with this call, returns true. Otherwise returns false.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) {
resp, err := b.delete(options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
func (b *Blob) delete(options *DeleteBlobOptions) (*http.Response, error) {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
if options.DeleteSnapshots != nil {
if *options.DeleteSnapshots {
headers["x-ms-delete-snapshots"] = "include"
} else {
headers["x-ms-delete-snapshots"] = "only"
}
}
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth)
}
// helper method to construct the path to either a blob or container
func pathForResource(container, name string) string {
if name != "" {
return fmt.Sprintf("/%s/%s", container, name)
}
return fmt.Sprintf("/%s", container)
}
func (b *Blob) respondCreation(resp *http.Response, bt BlobType) error {
defer drainRespBody(resp)
err := checkRespCode(resp, []int{http.StatusCreated})
if err != nil {
return err
}
b.Properties.BlobType = bt
return nil
}

View File

@ -1,179 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/url"
"strings"
"time"
)
// OverrideHeaders defines overridable response heaedrs in
// a request using a SAS URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type OverrideHeaders struct {
CacheControl string
ContentDisposition string
ContentEncoding string
ContentLanguage string
ContentType string
}
// BlobSASOptions are options to construct a blob SAS
// URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type BlobSASOptions struct {
BlobServiceSASPermissions
OverrideHeaders
SASOptions
}
// BlobServiceSASPermissions includes the available permissions for
// blob service SAS URI.
type BlobServiceSASPermissions struct {
Read bool
Add bool
Create bool
Write bool
Delete bool
}
func (p BlobServiceSASPermissions) buildString() string {
permissions := ""
if p.Read {
permissions += "r"
}
if p.Add {
permissions += "a"
}
if p.Create {
permissions += "c"
}
if p.Write {
permissions += "w"
}
if p.Delete {
permissions += "d"
}
return permissions
}
// GetSASURI creates an URL to the blob which contains the Shared
// Access Signature with the specified options.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) {
uri := b.GetURL()
signedResource := "b"
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true)
if err != nil {
return "", err
}
permissions := options.BlobServiceSASPermissions.buildString()
return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
}
func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) {
start := ""
if options.Start != (time.Time{}) {
start = options.Start.UTC().Format(time.RFC3339)
}
expiry := options.Expiry.UTC().Format(time.RFC3339)
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
canonicalizedResource, err := url.QueryUnescape(canonicalizedResource)
if err != nil {
return "", err
}
protocols := ""
if options.UseHTTPS {
protocols = "https"
}
stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, signedResource, "", headers)
if err != nil {
return "", err
}
sig := c.computeHmac256(stringToSign)
sasParams := url.Values{
"sv": {c.apiVersion},
"se": {expiry},
"sr": {signedResource},
"sp": {permissions},
"sig": {sig},
}
if start != "" {
sasParams.Add("st", start)
}
if c.apiVersion >= "2015-04-05" {
if protocols != "" {
sasParams.Add("spr", protocols)
}
if options.IP != "" {
sasParams.Add("sip", options.IP)
}
}
// Add override response hedaers
addQueryParameter(sasParams, "rscc", headers.CacheControl)
addQueryParameter(sasParams, "rscd", headers.ContentDisposition)
addQueryParameter(sasParams, "rsce", headers.ContentEncoding)
addQueryParameter(sasParams, "rscl", headers.ContentLanguage)
addQueryParameter(sasParams, "rsct", headers.ContentType)
sasURL, err := url.Parse(uri)
if err != nil {
return "", err
}
sasURL.RawQuery = sasParams.Encode()
return sasURL.String(), nil
}
func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime string, headers OverrideHeaders) (string, error) {
rscc := headers.CacheControl
rscd := headers.ContentDisposition
rsce := headers.ContentEncoding
rscl := headers.ContentLanguage
rsct := headers.ContentType
if signedVersion >= "2015-02-21" {
canonicalizedResource = "/blob" + canonicalizedResource
}
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
if signedVersion >= "2018-11-09" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime, rscc, rscd, rsce, rscl, rsct), nil
}
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
if signedVersion >= "2015-04-05" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
}
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
if signedVersion >= "2013-08-15" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
}
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
}

View File

@ -1,186 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
)
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
// Service.
type BlobStorageClient struct {
client Client
auth authentication
}
// GetServiceProperties gets the properties of your storage account's blob service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
return b.client.getServiceProperties(blobServiceName, b.auth)
}
// SetServiceProperties sets the properties of your storage account's blob service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
return b.client.setServiceProperties(props, blobServiceName, b.auth)
}
// ListContainersParameters defines the set of customizable parameters to make a
// List Containers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ListContainersParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// GetContainerReference returns a Container object for the specified container name.
func (b *BlobStorageClient) GetContainerReference(name string) *Container {
return &Container{
bsc: b,
Name: name,
}
}
// GetContainerReferenceFromSASURI returns a Container object for the specified
// container SASURI
func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) {
path := strings.Split(sasuri.Path, "/")
if len(path) <= 1 {
return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String())
}
c, err := newSASClientFromURL(&sasuri)
if err != nil {
return nil, err
}
cli := c.GetBlobService()
return &Container{
bsc: &cli,
Name: path[1],
sasuri: sasuri,
}, nil
}
// ListContainers returns the list of containers in a storage account along with
// pagination token and other response details.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
uri := b.client.getEndpoint(blobServiceName, "", q)
headers := b.client.getStandardHeaders()
type ContainerAlias struct {
bsc *BlobStorageClient
Name string `xml:"Name"`
Properties ContainerProperties `xml:"Properties"`
Metadata BlobMetadata
sasuri url.URL
}
type ContainerListResponseAlias struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Containers []ContainerAlias `xml:"Containers>Container"`
}
var outAlias ContainerListResponseAlias
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &outAlias)
if err != nil {
return nil, err
}
out := ContainerListResponse{
XMLName: outAlias.XMLName,
Xmlns: outAlias.Xmlns,
Prefix: outAlias.Prefix,
Marker: outAlias.Marker,
NextMarker: outAlias.NextMarker,
MaxResults: outAlias.MaxResults,
Containers: make([]Container, len(outAlias.Containers)),
}
for i, cnt := range outAlias.Containers {
out.Containers[i] = Container{
bsc: &b,
Name: cnt.Name,
Properties: cnt.Properties,
Metadata: map[string]string(cnt.Metadata),
sasuri: cnt.sasuri,
}
}
return &out, err
}
func (p ListContainersParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
if p.Timeout != 0 {
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
}
return out
}
func writeMetadata(h http.Header) map[string]string {
metadata := make(map[string]string)
for k, v := range h {
// Can't trust CanonicalHeaderKey() to munge case
// reliably. "_" is allowed in identifiers:
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
// http://tools.ietf.org/html/rfc7230#section-3.2
// ...but "_" is considered invalid by
// CanonicalMIMEHeaderKey in
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
k = strings.ToLower(k)
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
continue
}
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
k = k[len(userDefinedMetadataHeaderPrefix):]
metadata[k] = v[len(v)-1]
}
return metadata
}

View File

@ -1,311 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// BlockListType is used to filter out types of blocks in a Get Blocks List call
// for a block blob.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
// block types.
type BlockListType string
// Filters for listing blocks in block blobs
const (
BlockListTypeAll BlockListType = "all"
BlockListTypeCommitted BlockListType = "committed"
BlockListTypeUncommitted BlockListType = "uncommitted"
)
// Maximum sizes (per REST API) for various concepts
const (
MaxBlobBlockSize = 100 * 1024 * 1024
MaxBlobPageSize = 4 * 1024 * 1024
)
// BlockStatus defines states a block for a block blob can
// be in.
type BlockStatus string
// List of statuses that can be used to refer to a block in a block list
const (
BlockStatusUncommitted BlockStatus = "Uncommitted"
BlockStatusCommitted BlockStatus = "Committed"
BlockStatusLatest BlockStatus = "Latest"
)
// Block is used to create Block entities for Put Block List
// call.
type Block struct {
ID string
Status BlockStatus
}
// BlockListResponse contains the response fields from Get Block List call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
type BlockListResponse struct {
XMLName xml.Name `xml:"BlockList"`
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
}
// BlockResponse contains the block information returned
// in the GetBlockListCall.
type BlockResponse struct {
Name string `xml:"Name"`
Size int64 `xml:"Size"`
}
// CreateBlockBlob initializes an empty block blob with no blocks.
//
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
return b.CreateBlockBlobFromReader(nil, options)
}
// CreateBlockBlobFromReader initializes a block blob using data from
// reader. Size must be the number of bytes read from reader. To
// create an empty blob, use size==0 and reader==nil.
//
// Any headers set in blob.Properties or metadata in blob.Metadata
// will be set on the blob.
//
// The API rejects requests with size > 256 MiB (but this limit is not
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
// PutBlock, and PutBlockList.
//
// To create a blob from scratch, call container.GetBlobReference() to
// get an empty blob, fill in blob.Properties and blob.Metadata as
// appropriate then call this method.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeBlock)
headers["Content-Length"] = "0"
var n int64
var err error
if blob != nil {
type lener interface {
Len() int
}
// TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc.
if l, ok := blob.(lener); ok {
n = int64(l.Len())
} else {
var buf bytes.Buffer
n, err = io.Copy(&buf, blob)
if err != nil {
return err
}
blob = &buf
}
headers["Content-Length"] = strconv.FormatInt(n, 10)
}
b.Properties.ContentLength = n
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeBlock)
}
// PutBlockOptions includes the options for a put block operation
type PutBlockOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
ContentMD5 string `header:"Content-MD5"`
RequestID string `header:"x-ms-client-request-id"`
}
// PutBlock saves the given data chunk to the specified block blob with
// given ID.
//
// The API rejects chunks larger than 100 MiB (but this limit is not
// checked by the SDK).
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error {
return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options)
}
// PutBlockWithLength saves the given data stream of exactly specified size to
// the block blob with given ID. It is an alternative to PutBlocks where data
// comes as stream but the length is known in advance.
//
// The API rejects requests with size > 100 MiB (but this limit is not
// checked by the SDK).
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error {
query := url.Values{
"comp": {"block"},
"blockid": {blockID},
}
headers := b.Container.bsc.client.getStandardHeaders()
headers["Content-Length"] = fmt.Sprintf("%v", size)
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeBlock)
}
// PutBlockFromURLOptions includes the options for a put block from URL operation
type PutBlockFromURLOptions struct {
PutBlockOptions
SourceContentMD5 string `header:"x-ms-source-content-md5"`
SourceContentCRC64 string `header:"x-ms-source-content-crc64"`
}
// PutBlockFromURL copy data of exactly specified size from specified URL to
// the block blob with given ID. It is an alternative to PutBlocks where data
// comes from a remote URL and the offset and length is known in advance.
//
// The API rejects requests with size > 100 MiB (but this limit is not
// checked by the SDK).
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url
func (b *Blob) PutBlockFromURL(blockID string, blobURL string, offset int64, size uint64, options *PutBlockFromURLOptions) error {
query := url.Values{
"comp": {"block"},
"blockid": {blockID},
}
headers := b.Container.bsc.client.getStandardHeaders()
// The value of this header must be set to zero.
// When the length is not zero, the operation will fail with the status code 400 (Bad Request).
headers["Content-Length"] = "0"
headers["x-ms-copy-source"] = blobURL
headers["x-ms-source-range"] = fmt.Sprintf("bytes=%d-%d", offset, uint64(offset)+size-1)
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeBlock)
}
// PutBlockListOptions includes the options for a put block list operation
type PutBlockListOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// PutBlockList saves list of blocks to the specified block blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List
func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error {
params := url.Values{"comp": {"blocklist"}}
blockListXML := prepareBlockListRequest(blocks)
headers := b.Container.bsc.client.getStandardHeaders()
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// GetBlockListOptions includes the options for a get block list operation
type GetBlockListOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetBlockList retrieves list of blocks in the specified block blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List
func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) {
params := url.Values{
"comp": {"blocklist"},
"blocklisttype": {string(blockType)},
}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
var out BlockListResponse
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}

View File

@ -1,991 +0,0 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bufio"
"encoding/base64"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net/http"
"net/url"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/version"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
)
const (
// DefaultBaseURL is the domain name used for storage requests in the
// public cloud when a default client is created.
DefaultBaseURL = "core.windows.net"
// DefaultAPIVersion is the Azure Storage API version string used when a
// basic client is created.
DefaultAPIVersion = "2018-03-28"
defaultUseHTTPS = true
defaultRetryAttempts = 5
defaultRetryDuration = time.Second * 5
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountName = "devstoreaccount1"
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
blobServiceName = "blob"
tableServiceName = "table"
queueServiceName = "queue"
fileServiceName = "file"
storageEmulatorBlob = "127.0.0.1:10000"
storageEmulatorTable = "127.0.0.1:10002"
storageEmulatorQueue = "127.0.0.1:10001"
userAgentHeader = "User-Agent"
userDefinedMetadataHeaderPrefix = "x-ms-meta-"
connectionStringAccountName = "accountname"
connectionStringAccountKey = "accountkey"
connectionStringEndpointSuffix = "endpointsuffix"
connectionStringEndpointProtocol = "defaultendpointsprotocol"
connectionStringBlobEndpoint = "blobendpoint"
connectionStringFileEndpoint = "fileendpoint"
connectionStringQueueEndpoint = "queueendpoint"
connectionStringTableEndpoint = "tableendpoint"
connectionStringSAS = "sharedaccesssignature"
)
var (
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
defaultValidStatusCodes = []int{
http.StatusRequestTimeout, // 408
http.StatusInternalServerError, // 500
http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503
http.StatusGatewayTimeout, // 504
}
)
// Sender sends a request
type Sender interface {
Send(*Client, *http.Request) (*http.Response, error)
}
// DefaultSender is the default sender for the client. It implements
// an automatic retry strategy.
type DefaultSender struct {
RetryAttempts int
RetryDuration time.Duration
ValidStatusCodes []int
attempts int // used for testing
}
// Send is the default retry strategy in the client
func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) {
rr := autorest.NewRetriableRequest(req)
for attempts := 0; attempts < ds.RetryAttempts; attempts++ {
err = rr.Prepare()
if err != nil {
return resp, err
}
resp, err = c.HTTPClient.Do(rr.Request())
if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) {
return resp, err
}
drainRespBody(resp)
autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel)
ds.attempts = attempts
}
ds.attempts++
return resp, err
}
// Client is the object that needs to be constructed to perform
// operations on the storage account.
type Client struct {
// HTTPClient is the http.Client used to initiate API
// requests. http.DefaultClient is used when creating a
// client.
HTTPClient *http.Client
// Sender is an interface that sends the request. Clients are
// created with a DefaultSender. The DefaultSender has an
// automatic retry strategy built in. The Sender can be customized.
Sender Sender
accountName string
accountKey []byte
useHTTPS bool
UseSharedKeyLite bool
baseURL string
apiVersion string
userAgent string
sasClient bool
accountSASToken url.Values
}
type odataResponse struct {
resp *http.Response
odata odataErrorWrapper
}
// AzureStorageServiceError contains fields of the error response from
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
// Some fields might be specific to certain calls.
type AzureStorageServiceError struct {
Code string `xml:"Code"`
Message string `xml:"Message"`
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
QueryParameterName string `xml:"QueryParameterName"`
QueryParameterValue string `xml:"QueryParameterValue"`
Reason string `xml:"Reason"`
Lang string
StatusCode int
RequestID string
Date string
APIVersion string
}
type odataErrorMessage struct {
Lang string `json:"lang"`
Value string `json:"value"`
}
type odataError struct {
Code string `json:"code"`
Message odataErrorMessage `json:"message"`
}
type odataErrorWrapper struct {
Err odataError `json:"odata.error"`
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int
got int
inner error
}
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
got := s(e.got)
expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner)
}
// Got is the actual status code returned by Azure.
func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
// Inner returns any inner error info.
func (e UnexpectedStatusCodeError) Inner() error {
return e.inner
}
// NewClientFromConnectionString creates a Client from the connection string.
func NewClientFromConnectionString(input string) (Client, error) {
// build a map of connection string key/value pairs
parts := map[string]string{}
for _, pair := range strings.Split(input, ";") {
if pair == "" {
continue
}
equalDex := strings.IndexByte(pair, '=')
if equalDex <= 0 {
return Client{}, fmt.Errorf("Invalid connection segment %q", pair)
}
value := strings.TrimSpace(pair[equalDex+1:])
key := strings.TrimSpace(strings.ToLower(pair[:equalDex]))
parts[key] = value
}
// TODO: validate parameter sets?
if parts[connectionStringAccountName] == StorageEmulatorAccountName {
return NewEmulatorClient()
}
if parts[connectionStringSAS] != "" {
endpoint := ""
if parts[connectionStringBlobEndpoint] != "" {
endpoint = parts[connectionStringBlobEndpoint]
} else if parts[connectionStringFileEndpoint] != "" {
endpoint = parts[connectionStringFileEndpoint]
} else if parts[connectionStringQueueEndpoint] != "" {
endpoint = parts[connectionStringQueueEndpoint]
} else {
endpoint = parts[connectionStringTableEndpoint]
}
return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS])
}
useHTTPS := defaultUseHTTPS
if parts[connectionStringEndpointProtocol] != "" {
useHTTPS = parts[connectionStringEndpointProtocol] == "https"
}
return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey],
parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS)
}
// NewBasicClient constructs a Client with given storage service name and
// key.
func NewBasicClient(accountName, accountKey string) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
}
// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
// key in the referenced cloud.
func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
}
//NewEmulatorClient contructs a Client intended to only work with Azure
//Storage Emulator
func NewEmulatorClient() (Client, error) {
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
}
// NewClient constructs a Client. This should be used if the caller wants
// to specify whether to use HTTPS, a specific REST API version or a custom
// storage endpoint than Azure Public Cloud.
func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
var c Client
if !IsValidStorageAccount(accountName) {
return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName)
} else if accountKey == "" {
return c, fmt.Errorf("azure: account key required")
} else if serviceBaseURL == "" {
return c, fmt.Errorf("azure: base storage service url required")
}
key, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
}
c = Client{
HTTPClient: http.DefaultClient,
accountName: accountName,
accountKey: key,
useHTTPS: useHTTPS,
baseURL: serviceBaseURL,
apiVersion: apiVersion,
sasClient: false,
UseSharedKeyLite: false,
Sender: &DefaultSender{
RetryAttempts: defaultRetryAttempts,
ValidStatusCodes: defaultValidStatusCodes,
RetryDuration: defaultRetryDuration,
},
}
c.userAgent = c.getDefaultUserAgent()
return c, nil
}
// IsValidStorageAccount checks if the storage account name is valid.
// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account
func IsValidStorageAccount(account string) bool {
return validStorageAccount.MatchString(account)
}
// NewAccountSASClient contructs a client that uses accountSAS authorization
// for its operations.
func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client {
return newSASClient(account, env.StorageEndpointSuffix, token)
}
// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization
// for its operations using the specified endpoint and SAS token.
func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) {
u, err := url.Parse(endpoint)
if err != nil {
return Client{}, err
}
_, err = url.ParseQuery(sasToken)
if err != nil {
return Client{}, err
}
u.RawQuery = sasToken
return newSASClientFromURL(u)
}
func newSASClient(accountName, baseURL string, sasToken url.Values) Client {
c := Client{
HTTPClient: http.DefaultClient,
apiVersion: DefaultAPIVersion,
sasClient: true,
Sender: &DefaultSender{
RetryAttempts: defaultRetryAttempts,
ValidStatusCodes: defaultValidStatusCodes,
RetryDuration: defaultRetryDuration,
},
accountName: accountName,
baseURL: baseURL,
accountSASToken: sasToken,
useHTTPS: defaultUseHTTPS,
}
c.userAgent = c.getDefaultUserAgent()
// Get API version and protocol from token
c.apiVersion = sasToken.Get("sv")
if spr := sasToken.Get("spr"); spr != "" {
c.useHTTPS = spr == "https"
}
return c
}
func newSASClientFromURL(u *url.URL) (Client, error) {
// the host name will look something like this
// - foo.blob.core.windows.net
// "foo" is the account name
// "core.windows.net" is the baseURL
// find the first dot to get account name
i1 := strings.IndexByte(u.Host, '.')
if i1 < 0 {
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host)
}
// now find the second dot to get the base URL
i2 := strings.IndexByte(u.Host[i1+1:], '.')
if i2 < 0 {
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:])
}
sasToken := u.Query()
c := newSASClient(u.Host[:i1], u.Host[i1+i2+2:], sasToken)
if spr := sasToken.Get("spr"); spr == "" {
// infer from URL if not in the query params set
c.useHTTPS = u.Scheme == "https"
}
return c, nil
}
func (c Client) isServiceSASClient() bool {
return c.sasClient && c.accountSASToken == nil
}
func (c Client) isAccountSASClient() bool {
return c.sasClient && c.accountSASToken != nil
}
func (c Client) getDefaultUserAgent() string {
return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
version.Number,
c.apiVersion,
)
}
// AddToUserAgent adds an extension to the current user agent
func (c *Client) AddToUserAgent(extension string) error {
if extension != "" {
c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
return nil
}
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
}
// protectUserAgent is used in funcs that include extraheaders as a parameter.
// It prevents the User-Agent header to be overwritten, instead if it happens to
// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
if v, ok := extraheaders[userAgentHeader]; ok {
c.AddToUserAgent(v)
delete(extraheaders, userAgentHeader)
}
return extraheaders
}
func (c Client) getBaseURL(service string) *url.URL {
scheme := "http"
if c.useHTTPS {
scheme = "https"
}
host := ""
if c.accountName == StorageEmulatorAccountName {
switch service {
case blobServiceName:
host = storageEmulatorBlob
case tableServiceName:
host = storageEmulatorTable
case queueServiceName:
host = storageEmulatorQueue
}
} else {
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
}
return &url.URL{
Scheme: scheme,
Host: host,
}
}
func (c Client) getEndpoint(service, path string, params url.Values) string {
u := c.getBaseURL(service)
// API doesn't accept path segments not starting with '/'
if !strings.HasPrefix(path, "/") {
path = fmt.Sprintf("/%v", path)
}
if c.accountName == StorageEmulatorAccountName {
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
}
u.Path = path
u.RawQuery = params.Encode()
return u.String()
}
// AccountSASTokenOptions includes options for constructing
// an account SAS token.
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
type AccountSASTokenOptions struct {
APIVersion string
Services Services
ResourceTypes ResourceTypes
Permissions Permissions
Start time.Time
Expiry time.Time
IP string
UseHTTPS bool
}
// Services specify services accessible with an account SAS.
type Services struct {
Blob bool
Queue bool
Table bool
File bool
}
// ResourceTypes specify the resources accesible with an
// account SAS.
type ResourceTypes struct {
Service bool
Container bool
Object bool
}
// Permissions specifies permissions for an accountSAS.
type Permissions struct {
Read bool
Write bool
Delete bool
List bool
Add bool
Create bool
Update bool
Process bool
}
// GetAccountSASToken creates an account SAS token
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) {
if options.APIVersion == "" {
options.APIVersion = c.apiVersion
}
if options.APIVersion < "2015-04-05" {
return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion)
}
// build services string
services := ""
if options.Services.Blob {
services += "b"
}
if options.Services.Queue {
services += "q"
}
if options.Services.Table {
services += "t"
}
if options.Services.File {
services += "f"
}
// build resources string
resources := ""
if options.ResourceTypes.Service {
resources += "s"
}
if options.ResourceTypes.Container {
resources += "c"
}
if options.ResourceTypes.Object {
resources += "o"
}
// build permissions string
permissions := ""
if options.Permissions.Read {
permissions += "r"
}
if options.Permissions.Write {
permissions += "w"
}
if options.Permissions.Delete {
permissions += "d"
}
if options.Permissions.List {
permissions += "l"
}
if options.Permissions.Add {
permissions += "a"
}
if options.Permissions.Create {
permissions += "c"
}
if options.Permissions.Update {
permissions += "u"
}
if options.Permissions.Process {
permissions += "p"
}
// build start time, if exists
start := ""
if options.Start != (time.Time{}) {
start = options.Start.UTC().Format(time.RFC3339)
}
// build expiry time
expiry := options.Expiry.UTC().Format(time.RFC3339)
protocol := "https,http"
if options.UseHTTPS {
protocol = "https"
}
stringToSign := strings.Join([]string{
c.accountName,
permissions,
services,
resources,
start,
expiry,
options.IP,
protocol,
options.APIVersion,
"",
}, "\n")
signature := c.computeHmac256(stringToSign)
sasParams := url.Values{
"sv": {options.APIVersion},
"ss": {services},
"srt": {resources},
"sp": {permissions},
"se": {expiry},
"spr": {protocol},
"sig": {signature},
}
if start != "" {
sasParams.Add("st", start)
}
if options.IP != "" {
sasParams.Add("sip", options.IP)
}
return sasParams, nil
}
// GetBlobService returns a BlobStorageClient which can operate on the blob
// service of the storage account.
func (c Client) GetBlobService() BlobStorageClient {
b := BlobStorageClient{
client: c,
}
b.client.AddToUserAgent(blobServiceName)
b.auth = sharedKey
if c.UseSharedKeyLite {
b.auth = sharedKeyLite
}
return b
}
// GetQueueService returns a QueueServiceClient which can operate on the queue
// service of the storage account.
func (c Client) GetQueueService() QueueServiceClient {
q := QueueServiceClient{
client: c,
}
q.client.AddToUserAgent(queueServiceName)
q.auth = sharedKey
if c.UseSharedKeyLite {
q.auth = sharedKeyLite
}
return q
}
// GetTableService returns a TableServiceClient which can operate on the table
// service of the storage account.
func (c Client) GetTableService() TableServiceClient {
t := TableServiceClient{
client: c,
}
t.client.AddToUserAgent(tableServiceName)
t.auth = sharedKeyForTable
if c.UseSharedKeyLite {
t.auth = sharedKeyLiteForTable
}
return t
}
// GetFileService returns a FileServiceClient which can operate on the file
// service of the storage account.
func (c Client) GetFileService() FileServiceClient {
f := FileServiceClient{
client: c,
}
f.client.AddToUserAgent(fileServiceName)
f.auth = sharedKey
if c.UseSharedKeyLite {
f.auth = sharedKeyLite
}
return f
}
func (c Client) getStandardHeaders() map[string]string {
return map[string]string{
userAgentHeader: c.userAgent,
"x-ms-version": c.apiVersion,
"x-ms-date": currentTimeRfc1123Formatted(),
}
}
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) {
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
if err != nil {
return nil, err
}
req, err := http.NewRequest(verb, url, body)
if err != nil {
return nil, errors.New("azure/storage: error creating request: " + err.Error())
}
// http.NewRequest() will automatically set req.ContentLength for a handful of types
// otherwise we will handle here.
if req.ContentLength < 1 {
if clstr, ok := headers["Content-Length"]; ok {
if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil {
req.ContentLength = cl
}
}
}
for k, v := range headers {
req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645
}
if c.isAccountSASClient() {
// append the SAS token to the query params
v := req.URL.Query()
v = mergeParams(v, c.accountSASToken)
req.URL.RawQuery = v.Encode()
}
resp, err := c.Sender.Send(&c, req)
if err != nil {
return nil, err
}
if resp.StatusCode >= 400 && resp.StatusCode <= 505 {
return resp, getErrorFromResponse(resp)
}
return resp, nil
}
func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) {
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
if err != nil {
return nil, nil, nil, err
}
req, err := http.NewRequest(verb, url, body)
for k, v := range headers {
req.Header.Add(k, v)
}
resp, err := c.Sender.Send(&c, req)
if err != nil {
return nil, nil, nil, err
}
respToRet := &odataResponse{resp: resp}
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
var respBody []byte
respBody, err = readAndCloseBody(resp.Body)
if err != nil {
return nil, nil, nil, err
}
requestID, date, version := getDebugHeaders(resp.Header)
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
return respToRet, req, resp, err
}
// try unmarshal as odata.error json
err = json.Unmarshal(respBody, &respToRet.odata)
}
return respToRet, req, resp, err
}
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
return respToRet, err
}
func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
// execute common query, get back generated request, response etc... for more processing.
respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
if err != nil {
return nil, err
}
// return the OData in the case of executing batch commands.
// In this case we need to read the outer batch boundary and contents.
// Then we read the changeset information within the batch
var respBody []byte
respBody, err = readAndCloseBody(resp.Body)
if err != nil {
return nil, err
}
// outer multipart body
_, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0])
if err != nil {
return nil, err
}
// batch details.
batchBoundary := batchHeader["boundary"]
batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody)
if err != nil {
return nil, err
}
// changeset details.
err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary)
if err != nil {
return nil, err
}
return respToRet, nil
}
func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error {
changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary)
changesetPart, err := changesetMultiReader.NextPart()
if err != nil {
return err
}
changesetPartBufioReader := bufio.NewReader(changesetPart)
changesetResp, err := http.ReadResponse(changesetPartBufioReader, req)
if err != nil {
return err
}
if changesetResp.StatusCode != http.StatusNoContent {
changesetBody, err := readAndCloseBody(changesetResp.Body)
err = json.Unmarshal(changesetBody, &respToRet.odata)
if err != nil {
return err
}
respToRet.resp = changesetResp
}
return nil
}
func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) {
respBodyString := string(respBody)
respBodyReader := strings.NewReader(respBodyString)
// reading batchresponse
batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary)
batchPart, err := batchMultiReader.NextPart()
if err != nil {
return nil, "", err
}
batchPartBufioReader := bufio.NewReader(batchPart)
_, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type"))
if err != nil {
return nil, "", err
}
changesetBoundary := changesetHeader["boundary"]
return batchPartBufioReader, changesetBoundary, nil
}
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
defer body.Close()
out, err := ioutil.ReadAll(body)
if err == io.EOF {
err = nil
}
return out, err
}
// reads the response body then closes it
func drainRespBody(resp *http.Response) {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error {
if err := xml.Unmarshal(body, storageErr); err != nil {
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
return err
}
return nil
}
func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error {
odataError := odataErrorWrapper{}
if err := json.Unmarshal(body, &odataError); err != nil {
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
return err
}
storageErr.Code = odataError.Err.Code
storageErr.Message = odataError.Err.Message.Value
storageErr.Lang = odataError.Err.Message.Lang
return nil
}
func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError {
return AzureStorageServiceError{
StatusCode: code,
Code: status,
RequestID: requestID,
Date: date,
APIVersion: version,
Message: "no response body was available for error status code",
}
}
func (e AzureStorageServiceError) Error() string {
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s",
e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue)
}
// checkRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func checkRespCode(resp *http.Response, allowed []int) error {
for _, v := range allowed {
if resp.StatusCode == v {
return nil
}
}
err := getErrorFromResponse(resp)
return UnexpectedStatusCodeError{
allowed: allowed,
got: resp.StatusCode,
inner: err,
}
}
func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string {
metadata = c.protectUserAgent(metadata)
for k, v := range metadata {
h[userDefinedMetadataHeaderPrefix+k] = v
}
return h
}
func getDebugHeaders(h http.Header) (requestID, date, version string) {
requestID = h.Get("x-ms-request-id")
version = h.Get("x-ms-version")
date = h.Get("Date")
return
}
func getErrorFromResponse(resp *http.Response) error {
respBody, err := readAndCloseBody(resp.Body)
if err != nil {
return err
}
requestID, date, version := getDebugHeaders(resp.Header)
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
} else {
storageErr := AzureStorageServiceError{
StatusCode: resp.StatusCode,
RequestID: requestID,
Date: date,
APIVersion: version,
}
// response contains storage service error object, unmarshal
if resp.Header.Get("Content-Type") == "application/xml" {
errIn := serviceErrFromXML(respBody, &storageErr)
if err != nil { // error unmarshaling the error response
err = errIn
}
} else {
errIn := serviceErrFromJSON(respBody, &storageErr)
if err != nil { // error unmarshaling the error response
err = errIn
}
}
err = storageErr
}
return err
}

View File

@ -1,640 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// Container represents an Azure container.
type Container struct {
bsc *BlobStorageClient
Name string `xml:"Name"`
Properties ContainerProperties `xml:"Properties"`
Metadata map[string]string
sasuri url.URL
}
// Client returns the HTTP client used by the Container reference.
func (c *Container) Client() *Client {
return &c.bsc.client
}
func (c *Container) buildPath() string {
return fmt.Sprintf("/%s", c.Name)
}
// GetURL gets the canonical URL to the container.
// This method does not create a publicly accessible URL if the container
// is private and this method does not check if the blob exists.
func (c *Container) GetURL() string {
container := c.Name
if container == "" {
container = "$root"
}
return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil)
}
// ContainerSASOptions are options to construct a container SAS
// URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type ContainerSASOptions struct {
ContainerSASPermissions
OverrideHeaders
SASOptions
}
// ContainerSASPermissions includes the available permissions for
// a container SAS URI.
type ContainerSASPermissions struct {
BlobServiceSASPermissions
List bool
}
// GetSASURI creates an URL to the container which contains the Shared
// Access Signature with the specified options.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) {
uri := c.GetURL()
signedResource := "c"
canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true)
if err != nil {
return "", err
}
// build permissions string
permissions := options.BlobServiceSASPermissions.buildString()
if options.List {
permissions += "l"
}
return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
}
// ContainerProperties contains various properties of a container returned from
// various endpoints like ListContainers.
type ContainerProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
LeaseStatus string `xml:"LeaseStatus"`
LeaseState string `xml:"LeaseState"`
LeaseDuration string `xml:"LeaseDuration"`
PublicAccess ContainerAccessType `xml:"PublicAccess"`
}
// ContainerListResponse contains the response fields from
// ListContainers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ContainerListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Containers []Container `xml:"Containers>Container"`
}
// BlobListResponse contains the response fields from ListBlobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type BlobListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Blobs []Blob `xml:"Blobs>Blob"`
// BlobPrefix is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
// The list here can be thought of as "folders" that may contain
// other folders or blobs.
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
// Delimiter is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
Delimiter string `xml:"Delimiter"`
}
// IncludeBlobDataset has options to include in a list blobs operation
type IncludeBlobDataset struct {
Snapshots bool
Metadata bool
UncommittedBlobs bool
Copy bool
}
// ListBlobsParameters defines the set of customizable
// parameters to make a List Blobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type ListBlobsParameters struct {
Prefix string
Delimiter string
Marker string
Include *IncludeBlobDataset
MaxResults uint
Timeout uint
RequestID string
}
func (p ListBlobsParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Delimiter != "" {
out.Set("delimiter", p.Delimiter)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != nil {
include := []string{}
include = addString(include, p.Include.Snapshots, "snapshots")
include = addString(include, p.Include.Metadata, "metadata")
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
include = addString(include, p.Include.Copy, "copy")
fullInclude := strings.Join(include, ",")
out.Set("include", fullInclude)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
if p.Timeout != 0 {
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
}
return out
}
func addString(datasets []string, include bool, text string) []string {
if include {
datasets = append(datasets, text)
}
return datasets
}
// ContainerAccessType defines the access level to the container from a public
// request.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
// blob-public-access" header.
type ContainerAccessType string
// Access options for containers
const (
ContainerAccessTypePrivate ContainerAccessType = ""
ContainerAccessTypeBlob ContainerAccessType = "blob"
ContainerAccessTypeContainer ContainerAccessType = "container"
)
// ContainerAccessPolicy represents each access policy in the container ACL.
type ContainerAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanWrite bool
CanDelete bool
}
// ContainerPermissions represents the container ACLs.
type ContainerPermissions struct {
AccessType ContainerAccessType
AccessPolicies []ContainerAccessPolicy
}
// ContainerAccessHeader references header used when setting/getting container ACL
const (
ContainerAccessHeader string = "x-ms-blob-public-access"
)
// GetBlobReference returns a Blob object for the specified blob name.
func (c *Container) GetBlobReference(name string) *Blob {
return &Blob{
Container: c,
Name: name,
}
}
// CreateContainerOptions includes the options for a create container operation
type CreateContainerOptions struct {
Timeout uint
Access ContainerAccessType `header:"x-ms-blob-public-access"`
RequestID string `header:"x-ms-client-request-id"`
}
// Create creates a blob container within the storage account
// with given name and access level. Returns error if container already exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container
func (c *Container) Create(options *CreateContainerOptions) error {
resp, err := c.create(options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// CreateIfNotExists creates a blob container if it does not exist. Returns
// true if container is newly created or false if container already exists.
func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) {
resp, err := c.create(options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
return resp.StatusCode == http.StatusCreated, nil
}
}
return false, err
}
func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) {
query := url.Values{"restype": {"container"}}
headers := c.bsc.client.getStandardHeaders()
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
}
// Exists returns true if a container with given name exists
// on the storage account, otherwise returns false.
func (c *Container) Exists() (bool, error) {
q := url.Values{"restype": {"container"}}
var uri string
if c.bsc.client.isServiceSASClient() {
q = mergeParams(q, c.sasuri.Query())
newURI := c.sasuri
newURI.RawQuery = q.Encode()
uri = newURI.String()
} else {
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
}
headers := c.bsc.client.getStandardHeaders()
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, nil
}
}
return false, err
}
// SetContainerPermissionOptions includes options for a set container permissions operation
type SetContainerPermissionOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
RequestID string `header:"x-ms-client-request-id"`
}
// SetPermissions sets up container permissions
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL
func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error {
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
if err != nil {
return err
}
params := url.Values{
"restype": {"container"},
"comp": {"acl"},
}
headers := c.bsc.client.getStandardHeaders()
headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType))
headers["Content-Length"] = strconv.Itoa(length)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// GetContainerPermissionOptions includes options for a get container permissions operation
type GetContainerPermissionOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
// If timeout is 0 then it will not be passed to Azure
// leaseID will only be passed to Azure if populated
func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) {
params := url.Values{
"restype": {"container"},
"comp": {"acl"},
}
headers := c.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var ap AccessPolicy
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
return buildAccessPolicy(ap, &resp.Header), nil
}
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
// containerAccess. Blob, Container, empty
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
permissions := ContainerPermissions{
AccessType: ContainerAccessType(containerAccess),
AccessPolicies: []ContainerAccessPolicy{},
}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
capd := ContainerAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
}
return &permissions
}
// DeleteContainerOptions includes options for a delete container operation
type DeleteContainerOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
RequestID string `header:"x-ms-client-request-id"`
}
// Delete deletes the container with given name on the storage
// account. If the container does not exist returns error.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
func (c *Container) Delete(options *DeleteContainerOptions) error {
resp, err := c.delete(options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}
// DeleteIfExists deletes the container with given name on the storage
// account if it exists. Returns true if container is deleted with this call, or
// false if the container did not exist at the time of the Delete Container
// operation.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) {
resp, err := c.delete(options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) {
query := url.Values{"restype": {"container"}}
headers := c.bsc.client.getStandardHeaders()
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
}
// ListBlobs returns an object that contains list of blobs in the container,
// pagination token and other information in the response of List Blobs call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{
"restype": {"container"},
"comp": {"list"},
})
var uri string
if c.bsc.client.isServiceSASClient() {
q = mergeParams(q, c.sasuri.Query())
newURI := c.sasuri
newURI.RawQuery = q.Encode()
uri = newURI.String()
} else {
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
}
headers := c.bsc.client.getStandardHeaders()
headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID)
var out BlobListResponse
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
for i := range out.Blobs {
out.Blobs[i].Container = c
}
return out, err
}
// ContainerMetadataOptions includes options for container metadata operations
type ContainerMetadataOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// SetMetadata replaces the metadata for the specified container.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata
func (c *Container) SetMetadata(options *ContainerMetadataOptions) error {
params := url.Values{
"comp": {"metadata"},
"restype": {"container"},
}
headers := c.bsc.client.getStandardHeaders()
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// GetMetadata returns all user-defined metadata for the specified container.
//
// All metadata keys will be returned in lower case. (HTTP header
// names are case-insensitive.)
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata
func (c *Container) GetMetadata(options *ContainerMetadataOptions) error {
params := url.Values{
"comp": {"metadata"},
"restype": {"container"},
}
headers := c.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
c.writeMetadata(resp.Header)
return nil
}
func (c *Container) writeMetadata(h http.Header) {
c.Metadata = writeMetadata(h)
}
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, capd := range policies {
permission := capd.generateContainerPermissions()
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
// generate the permissions string (rwd).
// still want the end user API to have bool flags.
permissions = ""
if capd.CanRead {
permissions += "r"
}
if capd.CanWrite {
permissions += "w"
}
if capd.CanDelete {
permissions += "d"
}
return permissions
}
// GetProperties updated the properties of the container.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
func (c *Container) GetProperties() error {
params := url.Values{
"restype": {"container"},
}
headers := c.bsc.client.getStandardHeaders()
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return err
}
defer resp.Body.Close()
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
// update properties
c.Properties.Etag = resp.Header.Get(headerEtag)
c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status")
c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state")
c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration")
c.Properties.LastModified = resp.Header.Get("Last-Modified")
c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader))
return nil
}

View File

@ -1,237 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
)
const (
blobCopyStatusPending = "pending"
blobCopyStatusSuccess = "success"
blobCopyStatusAborted = "aborted"
blobCopyStatusFailed = "failed"
)
// CopyOptions includes the options for a copy blob operation
type CopyOptions struct {
Timeout uint
Source CopyOptionsConditions
Destiny CopyOptionsConditions
RequestID string
}
// IncrementalCopyOptions includes the options for an incremental copy blob operation
type IncrementalCopyOptions struct {
Timeout uint
Destination IncrementalCopyOptionsConditions
RequestID string
}
// CopyOptionsConditions includes some conditional options in a copy blob operation
type CopyOptionsConditions struct {
LeaseID string
IfModifiedSince *time.Time
IfUnmodifiedSince *time.Time
IfMatch string
IfNoneMatch string
}
// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation
type IncrementalCopyOptionsConditions struct {
IfModifiedSince *time.Time
IfUnmodifiedSince *time.Time
IfMatch string
IfNoneMatch string
}
// Copy starts a blob copy operation and waits for the operation to
// complete. sourceBlob parameter must be a canonical URL to the blob (can be
// obtained using the GetURL method.) There is no SLA on blob copy and therefore
// this helper method works faster on smaller files.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error {
copyID, err := b.StartCopy(sourceBlob, options)
if err != nil {
return err
}
return b.WaitForCopy(copyID)
}
// StartCopy starts a blob copy operation.
// sourceBlob parameter must be a canonical URL to the blob (can be
// obtained using the GetURL method.)
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-copy-source"] = sourceBlob
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
// source
headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID)
headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince)
headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince)
headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch)
headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch)
//destiny
headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID)
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince)
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince)
headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch)
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch)
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return "", err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
return "", err
}
copyID := resp.Header.Get("x-ms-copy-id")
if copyID == "" {
return "", errors.New("Got empty copy id header")
}
return copyID, nil
}
// AbortCopyOptions includes the options for an abort blob operation
type AbortCopyOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
// copyID is generated from StartBlobCopy function.
// currentLeaseID is required IF the destination blob has an active lease on it.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob
func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error {
params := url.Values{
"comp": {"copy"},
"copyid": {copyID},
}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-copy-action"] = "abort"
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// WaitForCopy loops until a BlobCopy operation is completed (or fails with error)
func (b *Blob) WaitForCopy(copyID string) error {
for {
err := b.GetProperties(nil)
if err != nil {
return err
}
if b.Properties.CopyID != copyID {
return errBlobCopyIDMismatch
}
switch b.Properties.CopyStatus {
case blobCopyStatusSuccess:
return nil
case blobCopyStatusPending:
continue
case blobCopyStatusAborted:
return errBlobCopyAborted
case blobCopyStatusFailed:
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription)
default:
return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus)
}
}
}
// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob
// sourceBlob parameter must be a valid snapshot URL of the original blob.
// THe original blob mut be public, or use a Shared Access Signature.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob .
func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) {
params := url.Values{"comp": {"incrementalcopy"}}
// need formatting to 7 decimal places so it's friendly to Windows and *nix
snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z")
u, err := url.Parse(sourceBlobURL)
if err != nil {
return "", err
}
query := u.Query()
query.Add("snapshot", snapshotTimeFormatted)
encodedQuery := query.Encode()
encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1)
u.RawQuery = encodedQuery
snapshotURL := u.String()
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-copy-source"] = snapshotURL
if options != nil {
addTimeout(params, options.Timeout)
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince)
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince)
headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch)
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch)
}
// get URI of destination blob
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return "", err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil {
return "", err
}
copyID := resp.Header.Get("x-ms-copy-id")
if copyID == "" {
return "", errors.New("Got empty copy id header")
}
return copyID, nil
}

View File

@ -1,238 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"net/http"
"net/url"
"sync"
)
// Directory represents a directory on a share.
type Directory struct {
fsc *FileServiceClient
Metadata map[string]string
Name string `xml:"Name"`
parent *Directory
Properties DirectoryProperties
share *Share
}
// DirectoryProperties contains various properties of a directory.
type DirectoryProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
}
// ListDirsAndFilesParameters defines the set of customizable parameters to
// make a List Files and Directories call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
type ListDirsAndFilesParameters struct {
Prefix string
Marker string
MaxResults uint
Timeout uint
}
// DirsAndFilesListResponse contains the response fields from
// a List Files and Directories call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
type DirsAndFilesListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Marker string `xml:"Marker"`
MaxResults int64 `xml:"MaxResults"`
Directories []Directory `xml:"Entries>Directory"`
Files []File `xml:"Entries>File"`
NextMarker string `xml:"NextMarker"`
}
// builds the complete directory path for this directory object.
func (d *Directory) buildPath() string {
path := ""
current := d
for current.Name != "" {
path = "/" + current.Name + path
current = current.parent
}
return d.share.buildPath() + path
}
// Create this directory in the associated share.
// If a directory with the same name already exists, the operation fails.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
func (d *Directory) Create(options *FileRequestOptions) error {
// if this is the root directory exit early
if d.parent == nil {
return nil
}
params := prepareOptions(options)
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
return nil
}
// CreateIfNotExists creates this directory under the associated share if the
// directory does not exists. Returns true if the directory is newly created or
// false if the directory already exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
// if this is the root directory exit early
if d.parent == nil {
return false, nil
}
params := prepareOptions(options)
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
if resp.StatusCode == http.StatusCreated {
d.updateEtagAndLastModified(resp.Header)
return true, nil
}
return false, d.FetchAttributes(nil)
}
}
return false, err
}
// Delete removes this directory. It must be empty in order to be deleted.
// If the directory does not exist the operation fails.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
func (d *Directory) Delete(options *FileRequestOptions) error {
return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options)
}
// DeleteIfExists removes this directory if it exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) {
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
// Exists returns true if this directory exists.
func (d *Directory) Exists() (bool, error) {
exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
if exists {
d.updateEtagAndLastModified(headers)
}
return exists, err
}
// FetchAttributes retrieves metadata for this directory.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties
func (d *Directory) FetchAttributes(options *FileRequestOptions) error {
params := prepareOptions(options)
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead)
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
d.Metadata = getMetadataFromHeaders(headers)
return nil
}
// GetDirectoryReference returns a child Directory object for this directory.
func (d *Directory) GetDirectoryReference(name string) *Directory {
return &Directory{
fsc: d.fsc,
Name: name,
parent: d,
share: d.share,
}
}
// GetFileReference returns a child File object for this directory.
func (d *Directory) GetFileReference(name string) *File {
return &File{
fsc: d.fsc,
Name: name,
parent: d,
share: d.share,
mutex: &sync.Mutex{},
}
}
// ListDirsAndFiles returns a list of files and directories under this directory.
// It also contains a pagination token and other response details.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
resp, err := d.fsc.listContent(d.buildPath(), q, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out DirsAndFilesListResponse
err = xmlUnmarshal(resp.Body, &out)
return &out, err
}
// SetMetadata replaces the metadata for this directory.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetDirectoryMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata
func (d *Directory) SetMetadata(options *FileRequestOptions) error {
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options)
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (d *Directory) updateEtagAndLastModified(headers http.Header) {
d.Properties.Etag = headers.Get("Etag")
d.Properties.LastModified = headers.Get("Last-Modified")
}
// URL gets the canonical URL to this directory.
// This method does not create a publicly accessible URL if the directory
// is private and this method does not check if the directory exists.
func (d *Directory) URL() string {
return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
}

View File

@ -1,466 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
uuid "github.com/satori/go.uuid"
)
// Annotating as secure for gas scanning
/* #nosec */
const (
partitionKeyNode = "PartitionKey"
rowKeyNode = "RowKey"
etagErrorTemplate = "Etag didn't match: %v"
)
var (
errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation")
errNilPreviousResult = errors.New("The previous results page is nil")
errNilNextLink = errors.New("There are no more pages in this query results")
)
// Entity represents an entity inside an Azure table.
type Entity struct {
Table *Table
PartitionKey string
RowKey string
TimeStamp time.Time
OdataMetadata string
OdataType string
OdataID string
OdataEtag string
OdataEditLink string
Properties map[string]interface{}
}
// GetEntityReference returns an Entity object with the specified
// partition key and row key.
func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity {
return &Entity{
PartitionKey: partitionKey,
RowKey: rowKey,
Table: t,
}
}
// EntityOptions includes options for entity operations.
type EntityOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// GetEntityOptions includes options for a get entity operation
type GetEntityOptions struct {
Select []string
RequestID string `header:"x-ms-client-request-id"`
}
// Get gets the referenced entity. Which properties to get can be
// specified using the select option.
// See:
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error {
if ml == EmptyPayload {
return errEmptyPayload
}
// RowKey and PartitionKey could be lost if not included in the query
// As those are the entity identifiers, it is best if they are not lost
rk := e.RowKey
pk := e.PartitionKey
query := url.Values{
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
}
headers := e.Table.tsc.client.getStandardHeaders()
headers[headerAccept] = string(ml)
if options != nil {
if len(options.Select) > 0 {
query.Add("$select", strings.Join(options.Select, ","))
}
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, e)
if err != nil {
return err
}
e.PartitionKey = pk
e.RowKey = rk
return nil
}
// Insert inserts the referenced entity in its table.
// The function fails if there is an entity with the same
// PartitionKey and RowKey in the table.
// ml determines the level of detail of metadata in the operation response,
// or no data at all.
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity
func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
body, err := json.Marshal(e)
if err != nil {
return err
}
headers = addBodyRelatedHeaders(headers, len(body))
headers = addReturnContentHeaders(headers, ml)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query)
resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if ml != EmptyPayload {
if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil {
return err
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if err = e.UnmarshalJSON(data); err != nil {
return err
}
} else {
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
}
return nil
}
// Update updates the contents of an entity. The function fails if there is no entity
// with the same PartitionKey and RowKey in the table or if the ETag is different
// than the one in Azure.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2
func (e *Entity) Update(force bool, options *EntityOptions) error {
return e.updateMerge(force, http.MethodPut, options)
}
// Merge merges the contents of entity specified with PartitionKey and RowKey
// with the content specified in Properties.
// The function fails if there is no entity with the same PartitionKey and
// RowKey in the table or if the ETag is different than the one in Azure.
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity
func (e *Entity) Merge(force bool, options *EntityOptions) error {
return e.updateMerge(force, "MERGE", options)
}
// Delete deletes the entity.
// The function fails if there is no entity with the same PartitionKey and
// RowKey in the table or if the ETag is different than the one in Azure.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1
func (e *Entity) Delete(force bool, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
headers = addIfMatchHeader(headers, force, e.OdataEtag)
headers = addReturnContentHeaders(headers, EmptyPayload)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth)
if err != nil {
if resp.StatusCode == http.StatusPreconditionFailed {
return fmt.Errorf(etagErrorTemplate, err)
}
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
return e.updateTimestamp(resp.Header)
}
// InsertOrReplace inserts an entity or replaces the existing one.
// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity
func (e *Entity) InsertOrReplace(options *EntityOptions) error {
return e.insertOr(http.MethodPut, options)
}
// InsertOrMerge inserts an entity or merges the existing one.
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity
func (e *Entity) InsertOrMerge(options *EntityOptions) error {
return e.insertOr("MERGE", options)
}
func (e *Entity) buildPath() string {
return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey)
}
// MarshalJSON is a custom marshaller for entity
func (e *Entity) MarshalJSON() ([]byte, error) {
completeMap := map[string]interface{}{}
completeMap[partitionKeyNode] = e.PartitionKey
completeMap[rowKeyNode] = e.RowKey
for k, v := range e.Properties {
typeKey := strings.Join([]string{k, OdataTypeSuffix}, "")
switch t := v.(type) {
case []byte:
completeMap[typeKey] = OdataBinary
completeMap[k] = t
case time.Time:
completeMap[typeKey] = OdataDateTime
completeMap[k] = t.Format(time.RFC3339Nano)
case uuid.UUID:
completeMap[typeKey] = OdataGUID
completeMap[k] = t.String()
case int64:
completeMap[typeKey] = OdataInt64
completeMap[k] = fmt.Sprintf("%v", v)
case float32, float64:
completeMap[typeKey] = OdataDouble
completeMap[k] = fmt.Sprintf("%v", v)
default:
completeMap[k] = v
}
if strings.HasSuffix(k, OdataTypeSuffix) {
if !(completeMap[k] == OdataBinary ||
completeMap[k] == OdataDateTime ||
completeMap[k] == OdataGUID ||
completeMap[k] == OdataInt64 ||
completeMap[k] == OdataDouble) {
return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k)
}
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
if _, ok := completeMap[valueKey]; !ok {
return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k)
}
}
}
return json.Marshal(completeMap)
}
// UnmarshalJSON is a custom unmarshaller for entities
func (e *Entity) UnmarshalJSON(data []byte) error {
errorTemplate := "Deserializing error: %v"
props := map[string]interface{}{}
err := json.Unmarshal(data, &props)
if err != nil {
return err
}
// deselialize metadata
e.OdataMetadata = stringFromMap(props, "odata.metadata")
e.OdataType = stringFromMap(props, "odata.type")
e.OdataID = stringFromMap(props, "odata.id")
e.OdataEtag = stringFromMap(props, "odata.etag")
e.OdataEditLink = stringFromMap(props, "odata.editLink")
e.PartitionKey = stringFromMap(props, partitionKeyNode)
e.RowKey = stringFromMap(props, rowKeyNode)
// deserialize timestamp
timeStamp, ok := props["Timestamp"]
if ok {
str, ok := timeStamp.(string)
if !ok {
return fmt.Errorf(errorTemplate, "Timestamp casting error")
}
t, err := time.Parse(time.RFC3339Nano, str)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
e.TimeStamp = t
}
delete(props, "Timestamp")
delete(props, "Timestamp@odata.type")
// deserialize entity (user defined fields)
for k, v := range props {
if strings.HasSuffix(k, OdataTypeSuffix) {
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
str, ok := props[valueKey].(string)
if !ok {
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v))
}
switch v {
case OdataBinary:
props[valueKey], err = base64.StdEncoding.DecodeString(str)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
case OdataDateTime:
t, err := time.Parse("2006-01-02T15:04:05Z", str)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
props[valueKey] = t
case OdataGUID:
props[valueKey] = uuid.FromStringOrNil(str)
case OdataInt64:
i, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
props[valueKey] = i
case OdataDouble:
f, err := strconv.ParseFloat(str, 64)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
props[valueKey] = f
default:
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v))
}
delete(props, k)
}
}
e.Properties = props
return nil
}
func getAndDelete(props map[string]interface{}, key string) interface{} {
if value, ok := props[key]; ok {
delete(props, key)
return value
}
return nil
}
func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string {
if force {
h[headerIfMatch] = "*"
} else {
h[headerIfMatch] = etag
}
return h
}
// updates Etag and timestamp
func (e *Entity) updateEtagAndTimestamp(headers http.Header) error {
e.OdataEtag = headers.Get(headerEtag)
return e.updateTimestamp(headers)
}
func (e *Entity) updateTimestamp(headers http.Header) error {
str := headers.Get(headerDate)
t, err := time.Parse(time.RFC1123, str)
if err != nil {
return fmt.Errorf("Update timestamp error: %v", err)
}
e.TimeStamp = t
return nil
}
func (e *Entity) insertOr(verb string, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
body, err := json.Marshal(e)
if err != nil {
return err
}
headers = addBodyRelatedHeaders(headers, len(body))
headers = addReturnContentHeaders(headers, EmptyPayload)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
return e.updateEtagAndTimestamp(resp.Header)
}
func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
body, err := json.Marshal(e)
if err != nil {
return err
}
headers = addBodyRelatedHeaders(headers, len(body))
headers = addIfMatchHeader(headers, force, e.OdataEtag)
headers = addReturnContentHeaders(headers, EmptyPayload)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
if err != nil {
if resp.StatusCode == http.StatusPreconditionFailed {
return fmt.Errorf(etagErrorTemplate, err)
}
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
return e.updateEtagAndTimestamp(resp.Header)
}
func stringFromMap(props map[string]interface{}, key string) string {
value := getAndDelete(props, key)
if value != nil {
return value.(string)
}
return ""
}
func (options *EntityOptions) getParameters() (url.Values, map[string]string) {
query := url.Values{}
headers := map[string]string{}
if options != nil {
query = addTimeout(query, options.Timeout)
headers = headersFromStruct(*options)
}
return query, headers
}

View File

@ -1,484 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"sync"
)
const fourMB = uint64(4194304)
const oneTB = uint64(1099511627776)
// Export maximum range and file sizes
// MaxRangeSize defines the maximum size in bytes for a file range.
const MaxRangeSize = fourMB
// MaxFileSize defines the maximum size in bytes for a file.
const MaxFileSize = oneTB
// File represents a file on a share.
type File struct {
fsc *FileServiceClient
Metadata map[string]string
Name string `xml:"Name"`
parent *Directory
Properties FileProperties `xml:"Properties"`
share *Share
FileCopyProperties FileCopyState
mutex *sync.Mutex
}
// FileProperties contains various properties of a file.
type FileProperties struct {
CacheControl string `header:"x-ms-cache-control"`
Disposition string `header:"x-ms-content-disposition"`
Encoding string `header:"x-ms-content-encoding"`
Etag string
Language string `header:"x-ms-content-language"`
LastModified string
Length uint64 `xml:"Content-Length" header:"x-ms-content-length"`
MD5 string `header:"x-ms-content-md5"`
Type string `header:"x-ms-content-type"`
}
// FileCopyState contains various properties of a file copy operation.
type FileCopyState struct {
CompletionTime string
ID string `header:"x-ms-copy-id"`
Progress string
Source string
Status string `header:"x-ms-copy-status"`
StatusDesc string
}
// FileStream contains file data returned from a call to GetFile.
type FileStream struct {
Body io.ReadCloser
ContentMD5 string
}
// FileRequestOptions will be passed to misc file operations.
// Currently just Timeout (in seconds) but could expand.
type FileRequestOptions struct {
Timeout uint // timeout duration in seconds.
}
func prepareOptions(options *FileRequestOptions) url.Values {
params := url.Values{}
if options != nil {
params = addTimeout(params, options.Timeout)
}
return params
}
// FileRanges contains a list of file range information for a file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
type FileRanges struct {
ContentLength uint64
LastModified string
ETag string
FileRanges []FileRange `xml:"Range"`
}
// FileRange contains range information for a file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
type FileRange struct {
Start uint64 `xml:"Start"`
End uint64 `xml:"End"`
}
func (fr FileRange) String() string {
return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End)
}
// builds the complete file path for this file object
func (f *File) buildPath() string {
return f.parent.buildPath() + "/" + f.Name
}
// ClearRange releases the specified range of space in a file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error {
var timeout *uint
if options != nil {
timeout = &options.Timeout
}
headers, err := f.modifyRange(nil, fileRange, timeout, nil)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// Create creates a new file or replaces an existing one.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File
func (f *File) Create(maxSize uint64, options *FileRequestOptions) error {
if maxSize > oneTB {
return fmt.Errorf("max file size is 1TB")
}
params := prepareOptions(options)
headers := headersFromStruct(f.Properties)
headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10)
headers["x-ms-type"] = "file"
outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated})
if err != nil {
return err
}
f.Properties.Length = maxSize
f.updateEtagAndLastModified(outputHeaders)
return nil
}
// CopyFile operation copied a file/blob from the sourceURL to the path provided.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
extraHeaders := map[string]string{
"x-ms-type": "file",
"x-ms-copy-source": sourceURL,
}
params := prepareOptions(options)
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
return nil
}
// Delete immediately removes this file from the storage account.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
func (f *File) Delete(options *FileRequestOptions) error {
return f.fsc.deleteResource(f.buildPath(), resourceFile, options)
}
// DeleteIfExists removes this file if it exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) {
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
// GetFileOptions includes options for a get file operation
type GetFileOptions struct {
Timeout uint
GetContentMD5 bool
}
// DownloadToStream operation downloads the file.
//
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) {
params := prepareOptions(options)
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil)
if err != nil {
return nil, err
}
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
drainRespBody(resp)
return nil, err
}
return resp.Body, nil
}
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) {
extraHeaders := map[string]string{
"Range": fileRange.String(),
}
params := url.Values{}
if options != nil {
if options.GetContentMD5 {
if isRangeTooBig(fileRange) {
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
}
extraHeaders["x-ms-range-get-content-md5"] = "true"
}
params = addTimeout(params, options.Timeout)
}
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders)
if err != nil {
return fs, err
}
if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
drainRespBody(resp)
return fs, err
}
fs.Body = resp.Body
if options != nil && options.GetContentMD5 {
fs.ContentMD5 = resp.Header.Get("Content-MD5")
}
return fs, nil
}
// Exists returns true if this file exists.
func (f *File) Exists() (bool, error) {
exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile)
if exists {
f.updateEtagAndLastModified(headers)
f.updateProperties(headers)
}
return exists, err
}
// FetchAttributes updates metadata and properties for this file.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties
func (f *File) FetchAttributes(options *FileRequestOptions) error {
params := prepareOptions(options)
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
f.updateProperties(headers)
f.Metadata = getMetadataFromHeaders(headers)
return nil
}
// returns true if the range is larger than 4MB
func isRangeTooBig(fileRange FileRange) bool {
if fileRange.End-fileRange.Start > fourMB {
return true
}
return false
}
// ListRangesOptions includes options for a list file ranges operation
type ListRangesOptions struct {
Timeout uint
ListRange *FileRange
}
// ListRanges returns the list of valid ranges for this file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) {
params := url.Values{"comp": {"rangelist"}}
// add optional range to list
var headers map[string]string
if options != nil {
params = addTimeout(params, options.Timeout)
if options.ListRange != nil {
headers = make(map[string]string)
headers["Range"] = options.ListRange.String()
}
}
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var cl uint64
cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64)
if err != nil {
ioutil.ReadAll(resp.Body)
return nil, err
}
var out FileRanges
out.ContentLength = cl
out.ETag = resp.Header.Get("ETag")
out.LastModified = resp.Header.Get("Last-Modified")
err = xmlUnmarshal(resp.Body, &out)
return &out, err
}
// modifies a range of bytes in this file
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) {
if err := f.fsc.checkForStorageEmulator(); err != nil {
return nil, err
}
if fileRange.End < fileRange.Start {
return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
}
if bytes != nil && isRangeTooBig(fileRange) {
return nil, errors.New("range cannot exceed 4MB in size")
}
params := url.Values{"comp": {"range"}}
if timeout != nil {
params = addTimeout(params, *timeout)
}
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params)
// default to clear
write := "clear"
cl := uint64(0)
// if bytes is not nil then this is an update operation
if bytes != nil {
write = "update"
cl = (fileRange.End - fileRange.Start) + 1
}
extraHeaders := map[string]string{
"Content-Length": strconv.FormatUint(cl, 10),
"Range": fileRange.String(),
"x-ms-write": write,
}
if contentMD5 != nil {
extraHeaders["Content-MD5"] = *contentMD5
}
headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders)
resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
return resp.Header, checkRespCode(resp, []int{http.StatusCreated})
}
// SetMetadata replaces the metadata for this file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetFileMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata
func (f *File) SetMetadata(options *FileRequestOptions) error {
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// SetProperties sets system properties on this file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by SetFileProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties
func (f *File) SetProperties(options *FileRequestOptions) error {
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (f *File) updateEtagAndLastModified(headers http.Header) {
f.Properties.Etag = headers.Get("Etag")
f.Properties.LastModified = headers.Get("Last-Modified")
}
// updates file properties from the specified HTTP header
func (f *File) updateProperties(header http.Header) {
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
if err == nil {
f.Properties.Length = size
}
f.updateEtagAndLastModified(header)
f.Properties.CacheControl = header.Get("Cache-Control")
f.Properties.Disposition = header.Get("Content-Disposition")
f.Properties.Encoding = header.Get("Content-Encoding")
f.Properties.Language = header.Get("Content-Language")
f.Properties.MD5 = header.Get("Content-MD5")
f.Properties.Type = header.Get("Content-Type")
}
// URL gets the canonical URL to this file.
// This method does not create a publicly accessible URL if the file
// is private and this method does not check if the file exists.
func (f *File) URL() string {
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil)
}
// WriteRangeOptions includes options for a write file range operation
type WriteRangeOptions struct {
Timeout uint
ContentMD5 string
}
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside
// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with
// a maximum size of 4MB.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error {
if bytes == nil {
return errors.New("bytes cannot be nil")
}
var timeout *uint
var md5 *string
if options != nil {
timeout = &options.Timeout
md5 = &options.ContentMD5
}
headers, err := f.modifyRange(bytes, fileRange, timeout, md5)
if err != nil {
return err
}
// it's perfectly legal for multiple go routines to call WriteRange
// on the same *File (e.g. concurrently writing non-overlapping ranges)
// so we must take the file mutex before updating our properties.
f.mutex.Lock()
f.updateEtagAndLastModified(headers)
f.mutex.Unlock()
return nil
}

View File

@ -1,338 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
)
// FileServiceClient contains operations for Microsoft Azure File Service.
type FileServiceClient struct {
client Client
auth authentication
}
// ListSharesParameters defines the set of customizable parameters to make a
// List Shares call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
type ListSharesParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// ShareListResponse contains the response fields from
// ListShares call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
type ShareListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Shares []Share `xml:"Shares>Share"`
}
type compType string
const (
compNone compType = ""
compList compType = "list"
compMetadata compType = "metadata"
compProperties compType = "properties"
compRangeList compType = "rangelist"
)
func (ct compType) String() string {
return string(ct)
}
type resourceType string
const (
resourceDirectory resourceType = "directory"
resourceFile resourceType = ""
resourceShare resourceType = "share"
)
func (rt resourceType) String() string {
return string(rt)
}
func (p ListSharesParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
if p.Timeout != 0 {
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
}
return out
}
func (p ListDirsAndFilesParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
out = addTimeout(out, p.Timeout)
return out
}
// returns url.Values for the specified types
func getURLInitValues(comp compType, res resourceType) url.Values {
values := url.Values{}
if comp != compNone {
values.Set("comp", comp.String())
}
if res != resourceFile {
values.Set("restype", res.String())
}
return values
}
// GetShareReference returns a Share object for the specified share name.
func (f *FileServiceClient) GetShareReference(name string) *Share {
return &Share{
fsc: f,
Name: name,
Properties: ShareProperties{
Quota: -1,
},
}
}
// ListShares returns the list of shares in a storage account along with
// pagination token and other response details.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
var out ShareListResponse
resp, err := f.listContent("", q, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
// assign our client to the newly created Share objects
for i := range out.Shares {
out.Shares[i].fsc = &f
}
return &out, err
}
// GetServiceProperties gets the properties of your storage account's file service.
// File service does not support logging
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return f.client.getServiceProperties(fileServiceName, f.auth)
}
// SetServiceProperties sets the properties of your storage account's file service.
// File service does not support logging
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error {
return f.client.setServiceProperties(props, fileServiceName, f.auth)
}
// retrieves directory or share content
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
uri := f.client.getEndpoint(fileServiceName, path, params)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth)
if err != nil {
return nil, err
}
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
drainRespBody(resp)
return nil, err
}
return resp, nil
}
// returns true if the specified resource exists
func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) {
if err := f.checkForStorageEmulator(); err != nil {
return false, nil, err
}
uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res))
headers := f.client.getStandardHeaders()
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, resp.Header, nil
}
}
return false, nil, err
}
// creates a resource depending on the specified resource type
func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) {
resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
return resp.Header, checkRespCode(resp, expectedResponseCodes)
}
// creates a resource depending on the specified resource type, doesn't close the response body
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
values := getURLInitValues(compNone, res)
combinedParams := mergeParams(values, urlParams)
uri := f.client.getEndpoint(fileServiceName, path, combinedParams)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
return f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
}
// returns HTTP header data for the specified directory or share
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) {
resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
return resp.Header, nil
}
// gets the specified resource, doesn't close the response body
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
params = mergeParams(params, getURLInitValues(comp, res))
uri := f.client.getEndpoint(fileServiceName, path, params)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
return f.client.exec(verb, uri, headers, nil, f.auth)
}
// deletes the resource and returns the response
func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error {
resp, err := f.deleteResourceNoClose(path, res, options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}
// deletes the resource and returns the response, doesn't close the response body
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options))
uri := f.client.getEndpoint(fileServiceName, path, values)
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
}
// merges metadata into extraHeaders and returns extraHeaders
func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string {
if metadata == nil && extraHeaders == nil {
return nil
}
if extraHeaders == nil {
extraHeaders = make(map[string]string)
}
for k, v := range metadata {
extraHeaders[userDefinedMetadataHeaderPrefix+k] = v
}
return extraHeaders
}
// sets extra header data for the specified resource
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
params := mergeParams(getURLInitValues(comp, res), prepareOptions(options))
uri := f.client.getEndpoint(fileServiceName, path, params)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
return resp.Header, checkRespCode(resp, []int{http.StatusOK})
}
//checkForStorageEmulator determines if the client is setup for use with
//Azure Storage Emulator, and returns a relevant error
func (f FileServiceClient) checkForStorageEmulator() error {
if f.client.accountName == StorageEmulatorAccountName {
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
}
return nil
}

View File

@ -1,201 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"net/http"
"net/url"
"strconv"
"time"
)
// lease constants.
const (
leaseHeaderPrefix = "x-ms-lease-"
headerLeaseID = "x-ms-lease-id"
leaseAction = "x-ms-lease-action"
leaseBreakPeriod = "x-ms-lease-break-period"
leaseDuration = "x-ms-lease-duration"
leaseProposedID = "x-ms-proposed-lease-id"
leaseTime = "x-ms-lease-time"
acquireLease = "acquire"
renewLease = "renew"
changeLease = "change"
releaseLease = "release"
breakLease = "break"
)
// leasePut is common PUT code for the various acquire/release/break etc functions.
func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) {
params := url.Values{"comp": {"lease"}}
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{expectedStatus}); err != nil {
return nil, err
}
return resp.Header, nil
}
// LeaseOptions includes options for all operations regarding leasing blobs
type LeaseOptions struct {
Timeout uint
Origin string `header:"Origin"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
RequestID string `header:"x-ms-client-request-id"`
}
// AcquireLease creates a lease for a blob
// returns leaseID acquired
// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = acquireLease
if leaseTimeInSeconds == -1 {
// Do nothing, but don't trigger the following clauses.
} else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" {
leaseTimeInSeconds = 60
} else if leaseTimeInSeconds < 15 {
leaseTimeInSeconds = 15
}
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
if proposedLeaseID != "" {
headers[leaseProposedID] = proposedLeaseID
}
respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options)
if err != nil {
return "", err
}
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
if returnedLeaseID != "" {
return returnedLeaseID, nil
}
return "", errors.New("LeaseID not returned")
}
// BreakLease breaks the lease for a blob
// Returns the timeout remaining in the lease in seconds
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = breakLease
return b.breakLeaseCommon(headers, options)
}
// BreakLeaseWithBreakPeriod breaks the lease for a blob
// breakPeriodInSeconds is used to determine how long until new lease can be created.
// Returns the timeout remaining in the lease in seconds
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = breakLease
headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
return b.breakLeaseCommon(headers, options)
}
// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) {
respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options)
if err != nil {
return 0, err
}
breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
if breakTimeoutStr != "" {
breakTimeout, err = strconv.Atoi(breakTimeoutStr)
if err != nil {
return 0, err
}
}
return breakTimeout, nil
}
// ChangeLease changes a lease ID for a blob
// Returns the new LeaseID acquired
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = changeLease
headers[headerLeaseID] = currentLeaseID
headers[leaseProposedID] = proposedLeaseID
respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options)
if err != nil {
return "", err
}
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
if newLeaseID != "" {
return newLeaseID, nil
}
return "", errors.New("LeaseID not returned")
}
// ReleaseLease releases the lease for a blob
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = releaseLease
headers[headerLeaseID] = currentLeaseID
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
if err != nil {
return err
}
return nil
}
// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = renewLease
headers[headerLeaseID] = currentLeaseID
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
if err != nil {
return err
}
return nil
}

View File

@ -1,171 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"time"
)
// Message represents an Azure message.
type Message struct {
Queue *Queue
Text string `xml:"MessageText"`
ID string `xml:"MessageId"`
Insertion TimeRFC1123 `xml:"InsertionTime"`
Expiration TimeRFC1123 `xml:"ExpirationTime"`
PopReceipt string `xml:"PopReceipt"`
NextVisible TimeRFC1123 `xml:"TimeNextVisible"`
DequeueCount int `xml:"DequeueCount"`
}
func (m *Message) buildPath() string {
return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID)
}
// PutMessageOptions is the set of options can be specified for Put Messsage
// operation. A zero struct does not use any preferences for the request.
type PutMessageOptions struct {
Timeout uint
VisibilityTimeout int
MessageTTL int
RequestID string `header:"x-ms-client-request-id"`
}
// Put operation adds a new message to the back of the message queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message
func (m *Message) Put(options *PutMessageOptions) error {
query := url.Values{}
headers := m.Queue.qsc.client.getStandardHeaders()
req := putMessageRequest{MessageText: m.Text}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers["Content-Length"] = strconv.Itoa(nn)
if options != nil {
if options.VisibilityTimeout != 0 {
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
}
if options.MessageTTL != 0 {
query.Set("messagettl", strconv.Itoa(options.MessageTTL))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query)
resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
err = checkRespCode(resp, []int{http.StatusCreated})
if err != nil {
return err
}
err = xmlUnmarshal(resp.Body, m)
if err != nil {
return err
}
return nil
}
// UpdateMessageOptions is the set of options can be specified for Update Messsage
// operation. A zero struct does not use any preferences for the request.
type UpdateMessageOptions struct {
Timeout uint
VisibilityTimeout int
RequestID string `header:"x-ms-client-request-id"`
}
// Update operation updates the specified message.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message
func (m *Message) Update(options *UpdateMessageOptions) error {
query := url.Values{}
if m.PopReceipt != "" {
query.Set("popreceipt", m.PopReceipt)
}
headers := m.Queue.qsc.client.getStandardHeaders()
req := putMessageRequest{MessageText: m.Text}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers["Content-Length"] = strconv.Itoa(nn)
// visibilitytimeout is required for Update (zero or greater) so set the default here
query.Set("visibilitytimeout", "0")
if options != nil {
if options.VisibilityTimeout != 0 {
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query)
resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
m.PopReceipt = resp.Header.Get("x-ms-popreceipt")
nextTimeStr := resp.Header.Get("x-ms-time-next-visible")
if nextTimeStr != "" {
nextTime, err := time.Parse(time.RFC1123, nextTimeStr)
if err != nil {
return err
}
m.NextVisible = TimeRFC1123(nextTime)
}
return checkRespCode(resp, []int{http.StatusNoContent})
}
// Delete operation deletes the specified message.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
func (m *Message) Delete(options *QueueServiceOptions) error {
params := url.Values{"popreceipt": {m.PopReceipt}}
headers := m.Queue.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params)
resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
type putMessageRequest struct {
XMLName xml.Name `xml:"QueueMessage"`
MessageText string `xml:"MessageText"`
}

View File

@ -1,48 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// MetadataLevel determines if operations should return a paylod,
// and it level of detail.
type MetadataLevel string
// This consts are meant to help with Odata supported operations
const (
OdataTypeSuffix = "@odata.type"
// Types
OdataBinary = "Edm.Binary"
OdataDateTime = "Edm.DateTime"
OdataDouble = "Edm.Double"
OdataGUID = "Edm.Guid"
OdataInt64 = "Edm.Int64"
// Query options
OdataFilter = "$filter"
OdataOrderBy = "$orderby"
OdataTop = "$top"
OdataSkip = "$skip"
OdataCount = "$count"
OdataExpand = "$expand"
OdataSelect = "$select"
OdataSearch = "$search"
EmptyPayload MetadataLevel = ""
NoMetadata MetadataLevel = "application/json;odata=nometadata"
MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata"
FullMetadata MetadataLevel = "application/json;odata=fullmetadata"
)

View File

@ -1,203 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"time"
)
// GetPageRangesResponse contains the response fields from
// Get Page Ranges call.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
type GetPageRangesResponse struct {
XMLName xml.Name `xml:"PageList"`
PageList []PageRange `xml:"PageRange"`
}
// PageRange contains information about a page of a page blob from
// Get Pages Range call.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
type PageRange struct {
Start int64 `xml:"Start"`
End int64 `xml:"End"`
}
var (
errBlobCopyAborted = errors.New("storage: blob copy is aborted")
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
)
// PutPageOptions includes the options for a put page operation
type PutPageOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"`
IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"`
IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// WriteRange writes a range of pages to a page blob.
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
// multiplies by 512.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
if bytes == nil {
return errors.New("bytes cannot be nil")
}
return b.modifyRange(blobRange, bytes, options)
}
// ClearRange clears the given range in a page blob.
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
// multiplies by 512.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error {
return b.modifyRange(blobRange, nil, options)
}
func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
if blobRange.End < blobRange.Start {
return errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
}
if blobRange.Start%512 != 0 {
return errors.New("the value for rangeStart must be a multiple of 512")
}
if blobRange.End%512 != 511 {
return errors.New("the value for rangeEnd must be a multiple of 512 - 1")
}
params := url.Values{"comp": {"page"}}
// default to clear
write := "clear"
var cl uint64
// if bytes is not nil then this is an update operation
if bytes != nil {
write = "update"
cl = (blobRange.End - blobRange.Start) + 1
}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypePage)
headers["x-ms-page-write"] = write
headers["x-ms-range"] = blobRange.String()
headers["Content-Length"] = fmt.Sprintf("%v", cl)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// GetPageRangesOptions includes the options for a get page ranges operation
type GetPageRangesOptions struct {
Timeout uint
Snapshot *time.Time
PreviousSnapshot *time.Time
Range *BlobRange
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetPageRanges returns the list of valid page ranges for a page blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges
func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) {
params := url.Values{"comp": {"pagelist"}}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
if options.PreviousSnapshot != nil {
params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot))
}
if options.Range != nil {
headers["Range"] = options.Range.String()
}
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
var out GetPageRangesResponse
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return out, err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return out, err
}
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// PutPageBlob initializes an empty page blob with specified name and maximum
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
// be created using this method before writing pages.
//
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) PutPageBlob(options *PutBlobOptions) error {
if b.Properties.ContentLength%512 != 0 {
return errors.New("Content length must be aligned to a 512-byte boundary")
}
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypePage)
headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength)
headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber)
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypePage)
}

View File

@ -1,436 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
const (
// casing is per Golang's http.Header canonicalizing the header names.
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
)
// QueueAccessPolicy represents each access policy in the queue ACL.
type QueueAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanAdd bool
CanUpdate bool
CanProcess bool
}
// QueuePermissions represents the queue ACLs.
type QueuePermissions struct {
AccessPolicies []QueueAccessPolicy
}
// SetQueuePermissionOptions includes options for a set queue permissions operation
type SetQueuePermissionOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// Queue represents an Azure queue.
type Queue struct {
qsc *QueueServiceClient
Name string
Metadata map[string]string
AproxMessageCount uint64
}
func (q *Queue) buildPath() string {
return fmt.Sprintf("/%s", q.Name)
}
func (q *Queue) buildPathMessages() string {
return fmt.Sprintf("%s/messages", q.buildPath())
}
// QueueServiceOptions includes options for some queue service operations
type QueueServiceOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// Create operation creates a queue under the given account.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4
func (q *Queue) Create(options *QueueServiceOptions) error {
params := url.Values{}
headers := q.qsc.client.getStandardHeaders()
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// Delete operation permanently deletes the specified queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3
func (q *Queue) Delete(options *QueueServiceOptions) error {
params := url.Values{}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// Exists returns true if a queue with given name exists.
func (q *Queue) Exists() (bool, error) {
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, nil
}
err = getErrorFromResponse(resp)
}
return false, err
}
// SetMetadata operation sets user-defined metadata on the specified queue.
// Metadata is associated with the queue as name-value pairs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
func (q *Queue) SetMetadata(options *QueueServiceOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := q.qsc.client.getStandardHeaders()
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// GetMetadata operation retrieves user-defined metadata and queue
// properties on the specified queue. Metadata is associated with
// the queue as name-values pairs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
//
// Because the way Golang's http client (and http.Header in particular)
// canonicalize header names, the returned metadata names would always
// be all lower case.
func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader))
if aproxMessagesStr != "" {
aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64)
if err != nil {
return err
}
q.AproxMessageCount = aproxMessages
}
q.Metadata = getMetadataFromHeaders(resp.Header)
return nil
}
// GetMessageReference returns a message object with the specified text.
func (q *Queue) GetMessageReference(text string) *Message {
return &Message{
Queue: q,
Text: text,
}
}
// GetMessagesOptions is the set of options can be specified for Get
// Messsages operation. A zero struct does not use any preferences for the
// request.
type GetMessagesOptions struct {
Timeout uint
NumOfMessages int
VisibilityTimeout int
RequestID string `header:"x-ms-client-request-id"`
}
type messages struct {
XMLName xml.Name `xml:"QueueMessagesList"`
Messages []Message `xml:"QueueMessage"`
}
// GetMessages operation retrieves one or more messages from the front of the
// queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages
func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) {
query := url.Values{}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
if options.NumOfMessages != 0 {
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
}
if options.VisibilityTimeout != 0 {
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return []Message{}, err
}
defer resp.Body.Close()
var out messages
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return []Message{}, err
}
for i := range out.Messages {
out.Messages[i].Queue = q
}
return out.Messages, err
}
// PeekMessagesOptions is the set of options can be specified for Peek
// Messsage operation. A zero struct does not use any preferences for the
// request.
type PeekMessagesOptions struct {
Timeout uint
NumOfMessages int
RequestID string `header:"x-ms-client-request-id"`
}
// PeekMessages retrieves one or more messages from the front of the queue, but
// does not alter the visibility of the message.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages
func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) {
query := url.Values{"peekonly": {"true"}} // Required for peek operation
headers := q.qsc.client.getStandardHeaders()
if options != nil {
if options.NumOfMessages != 0 {
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return []Message{}, err
}
defer resp.Body.Close()
var out messages
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return []Message{}, err
}
for i := range out.Messages {
out.Messages[i].Queue = q
}
return out.Messages, err
}
// ClearMessages operation deletes all messages from the specified queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages
func (q *Queue) ClearMessages(options *QueueServiceOptions) error {
params := url.Values{}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params)
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// SetPermissions sets up queue permissions
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl
func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error {
body, length, err := generateQueueACLpayload(permissions.AccessPolicies)
if err != nil {
return err
}
params := url.Values{
"comp": {"acl"},
}
headers := q.qsc.client.getStandardHeaders()
headers["Content-Length"] = strconv.Itoa(length)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, qapd := range policies {
permission := qapd.generateQueuePermissions()
signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) {
// generate the permissions string (raup).
// still want the end user API to have bool flags.
permissions = ""
if qapd.CanRead {
permissions += "r"
}
if qapd.CanAdd {
permissions += "a"
}
if qapd.CanUpdate {
permissions += "u"
}
if qapd.CanProcess {
permissions += "p"
}
return permissions
}
// GetQueuePermissionOptions includes options for a get queue permissions operation
type GetQueuePermissionOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl
// If timeout is 0 then it will not be passed to Azure
func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) {
params := url.Values{
"comp": {"acl"},
}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var ap AccessPolicy
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
return buildQueueAccessPolicy(ap, &resp.Header), nil
}
func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions {
permissions := QueuePermissions{
AccessPolicies: []QueueAccessPolicy{},
}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
qapd := QueueAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a")
qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p")
permissions.AccessPolicies = append(permissions.AccessPolicies, qapd)
}
return &permissions
}

View File

@ -1,146 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/url"
"strings"
"time"
)
// QueueSASOptions are options to construct a blob SAS
// URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type QueueSASOptions struct {
QueueSASPermissions
SASOptions
}
// QueueSASPermissions includes the available permissions for
// a queue SAS URI.
type QueueSASPermissions struct {
Read bool
Add bool
Update bool
Process bool
}
func (q QueueSASPermissions) buildString() string {
permissions := ""
if q.Read {
permissions += "r"
}
if q.Add {
permissions += "a"
}
if q.Update {
permissions += "u"
}
if q.Process {
permissions += "p"
}
return permissions
}
// GetSASURI creates an URL to the specified queue which contains the Shared
// Access Signature with specified permissions and expiration time.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) {
canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true)
if err != nil {
return "", err
}
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
// later, the storage account name, and the resource name, and must be URL-decoded.
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
if err != nil {
return "", err
}
signedStart := ""
if options.Start != (time.Time{}) {
signedStart = options.Start.UTC().Format(time.RFC3339)
}
signedExpiry := options.Expiry.UTC().Format(time.RFC3339)
protocols := "https,http"
if options.UseHTTPS {
protocols = "https"
}
permissions := options.QueueSASPermissions.buildString()
stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier)
if err != nil {
return "", err
}
sig := q.qsc.client.computeHmac256(stringToSign)
sasParams := url.Values{
"sv": {q.qsc.client.apiVersion},
"se": {signedExpiry},
"sp": {permissions},
"sig": {sig},
}
if q.qsc.client.apiVersion >= "2015-04-05" {
sasParams.Add("spr", protocols)
addQueryParameter(sasParams, "sip", options.IP)
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil)
sasURL, err := url.Parse(uri)
if err != nil {
return "", err
}
sasURL.RawQuery = sasParams.Encode()
return sasURL.String(), nil
}
func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) {
if signedVersion >= "2015-02-21" {
canonicalizedResource = "/queue" + canonicalizedResource
}
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
if signedVersion >= "2015-04-05" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
signedPermissions,
signedStart,
signedExpiry,
canonicalizedResource,
signedIdentifier,
signedIP,
protocols,
signedVersion), nil
}
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
if signedVersion >= "2013-08-15" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil
}
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
}

View File

@ -1,42 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
// Service.
type QueueServiceClient struct {
client Client
auth authentication
}
// GetServiceProperties gets the properties of your storage account's queue service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return q.client.getServiceProperties(queueServiceName, q.auth)
}
// SetServiceProperties sets the properties of your storage account's queue service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
return q.client.setServiceProperties(props, queueServiceName, q.auth)
}
// GetQueueReference returns a Container object for the specified queue name.
func (q *QueueServiceClient) GetQueueReference(name string) *Queue {
return &Queue{
qsc: q,
Name: name,
}
}

View File

@ -1,216 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"net/http"
"net/url"
"strconv"
)
// Share represents an Azure file share.
type Share struct {
fsc *FileServiceClient
Name string `xml:"Name"`
Properties ShareProperties `xml:"Properties"`
Metadata map[string]string
}
// ShareProperties contains various properties of a share.
type ShareProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
Quota int `xml:"Quota"`
}
// builds the complete path for this share object.
func (s *Share) buildPath() string {
return fmt.Sprintf("/%s", s.Name)
}
// Create this share under the associated account.
// If a share with the same name already exists, the operation fails.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
func (s *Share) Create(options *FileRequestOptions) error {
extraheaders := map[string]string{}
if s.Properties.Quota > 0 {
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
}
params := prepareOptions(options)
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated})
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
return nil
}
// CreateIfNotExists creates this share under the associated account if
// it does not exist. Returns true if the share is newly created or false if
// the share already exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
extraheaders := map[string]string{}
if s.Properties.Quota > 0 {
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
}
params := prepareOptions(options)
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
if resp.StatusCode == http.StatusCreated {
s.updateEtagAndLastModified(resp.Header)
return true, nil
}
return false, s.FetchAttributes(nil)
}
}
return false, err
}
// Delete marks this share for deletion. The share along with any files
// and directories contained within it are later deleted during garbage
// collection. If the share does not exist the operation fails
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
func (s *Share) Delete(options *FileRequestOptions) error {
return s.fsc.deleteResource(s.buildPath(), resourceShare, options)
}
// DeleteIfExists operation marks this share for deletion if it exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) {
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
// Exists returns true if this share already exists
// on the storage account, otherwise returns false.
func (s *Share) Exists() (bool, error) {
exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare)
if exists {
s.updateEtagAndLastModified(headers)
s.updateQuota(headers)
}
return exists, err
}
// FetchAttributes retrieves metadata and properties for this share.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties
func (s *Share) FetchAttributes(options *FileRequestOptions) error {
params := prepareOptions(options)
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead)
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
s.updateQuota(headers)
s.Metadata = getMetadataFromHeaders(headers)
return nil
}
// GetRootDirectoryReference returns a Directory object at the root of this share.
func (s *Share) GetRootDirectoryReference() *Directory {
return &Directory{
fsc: s.fsc,
share: s,
}
}
// ServiceClient returns the FileServiceClient associated with this share.
func (s *Share) ServiceClient() *FileServiceClient {
return s.fsc
}
// SetMetadata replaces the metadata for this share.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetShareMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata
func (s *Share) SetMetadata(options *FileRequestOptions) error {
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options)
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
return nil
}
// SetProperties sets system properties for this share.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by SetShareProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties
func (s *Share) SetProperties(options *FileRequestOptions) error {
extraheaders := map[string]string{}
if s.Properties.Quota > 0 {
if s.Properties.Quota > 5120 {
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
}
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
}
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options)
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (s *Share) updateEtagAndLastModified(headers http.Header) {
s.Properties.Etag = headers.Get("Etag")
s.Properties.LastModified = headers.Get("Last-Modified")
}
// updates quota value
func (s *Share) updateQuota(headers http.Header) {
quota, err := strconv.Atoi(headers.Get("x-ms-share-quota"))
if err == nil {
s.Properties.Quota = quota
}
}
// URL gets the canonical URL to this share. This method does not create a publicly accessible
// URL if the share is private and this method does not check if the share exists.
func (s *Share) URL() string {
return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{})
}

View File

@ -1,61 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"strings"
"time"
)
// AccessPolicyDetailsXML has specifics about an access policy
// annotated with XML details.
type AccessPolicyDetailsXML struct {
StartTime time.Time `xml:"Start"`
ExpiryTime time.Time `xml:"Expiry"`
Permission string `xml:"Permission"`
}
// SignedIdentifier is a wrapper for a specific policy
type SignedIdentifier struct {
ID string `xml:"Id"`
AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"`
}
// SignedIdentifiers part of the response from GetPermissions call.
type SignedIdentifiers struct {
SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"`
}
// AccessPolicy is the response type from the GetPermissions call.
type AccessPolicy struct {
SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"`
}
// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the
// AccessPolicy struct which will get converted to XML.
func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier {
return SignedIdentifier{
ID: id,
AccessPolicy: AccessPolicyDetailsXML{
StartTime: startTime.UTC().Round(time.Second),
ExpiryTime: expiryTime.UTC().Round(time.Second),
Permission: permissions,
},
}
}
func updatePermissions(permissions, permission string) bool {
return strings.Contains(permissions, permission)
}

View File

@ -1,150 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/http"
"net/url"
"strconv"
)
// ServiceProperties represents the storage account service properties
type ServiceProperties struct {
Logging *Logging
HourMetrics *Metrics
MinuteMetrics *Metrics
Cors *Cors
DeleteRetentionPolicy *RetentionPolicy // blob storage only
StaticWebsite *StaticWebsite // blob storage only
}
// Logging represents the Azure Analytics Logging settings
type Logging struct {
Version string
Delete bool
Read bool
Write bool
RetentionPolicy *RetentionPolicy
}
// RetentionPolicy indicates if retention is enabled and for how many days
type RetentionPolicy struct {
Enabled bool
Days *int
}
// Metrics provide request statistics.
type Metrics struct {
Version string
Enabled bool
IncludeAPIs *bool
RetentionPolicy *RetentionPolicy
}
// Cors includes all the CORS rules
type Cors struct {
CorsRule []CorsRule
}
// CorsRule includes all settings for a Cors rule
type CorsRule struct {
AllowedOrigins string
AllowedMethods string
MaxAgeInSeconds int
ExposedHeaders string
AllowedHeaders string
}
// StaticWebsite - The properties that enable an account to host a static website
type StaticWebsite struct {
// Enabled - Indicates whether this account is hosting a static website
Enabled bool
// IndexDocument - The default name of the index page under each directory
IndexDocument *string
// ErrorDocument404Path - The absolute path of the custom 404 page
ErrorDocument404Path *string
}
func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) {
query := url.Values{
"restype": {"service"},
"comp": {"properties"},
}
uri := c.getEndpoint(service, "", query)
headers := c.getStandardHeaders()
resp, err := c.exec(http.MethodGet, uri, headers, nil, auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
var out ServiceProperties
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error {
query := url.Values{
"restype": {"service"},
"comp": {"properties"},
}
uri := c.getEndpoint(service, "", query)
// Ideally, StorageServiceProperties would be the output struct
// This is to avoid golint stuttering, while generating the correct XML
type StorageServiceProperties struct {
Logging *Logging
HourMetrics *Metrics
MinuteMetrics *Metrics
Cors *Cors
DeleteRetentionPolicy *RetentionPolicy
StaticWebsite *StaticWebsite
}
input := StorageServiceProperties{
Logging: props.Logging,
HourMetrics: props.HourMetrics,
MinuteMetrics: props.MinuteMetrics,
Cors: props.Cors,
}
// only set these fields for blob storage else it's invalid XML
if service == blobServiceName {
input.DeleteRetentionPolicy = props.DeleteRetentionPolicy
input.StaticWebsite = props.StaticWebsite
}
body, length, err := xmlMarshal(input)
if err != nil {
return err
}
headers := c.getStandardHeaders()
headers["Content-Length"] = strconv.Itoa(length)
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}

View File

@ -1,423 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
const (
tablesURIPath = "/Tables"
nextTableQueryParameter = "NextTableName"
headerNextPartitionKey = "x-ms-continuation-NextPartitionKey"
headerNextRowKey = "x-ms-continuation-NextRowKey"
nextPartitionKeyQueryParameter = "NextPartitionKey"
nextRowKeyQueryParameter = "NextRowKey"
)
// TableAccessPolicy are used for SETTING table policies
type TableAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanAppend bool
CanUpdate bool
CanDelete bool
}
// Table represents an Azure table.
type Table struct {
tsc *TableServiceClient
Name string `json:"TableName"`
OdataEditLink string `json:"odata.editLink"`
OdataID string `json:"odata.id"`
OdataMetadata string `json:"odata.metadata"`
OdataType string `json:"odata.type"`
}
// EntityQueryResult contains the response from
// ExecuteQuery and ExecuteQueryNextResults functions.
type EntityQueryResult struct {
OdataMetadata string `json:"odata.metadata"`
Entities []*Entity `json:"value"`
QueryNextLink
table *Table
}
type continuationToken struct {
NextPartitionKey string
NextRowKey string
}
func (t *Table) buildPath() string {
return fmt.Sprintf("/%s", t.Name)
}
func (t *Table) buildSpecificPath() string {
return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name)
}
// Get gets the referenced table.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
func (t *Table) Get(timeout uint, ml MetadataLevel) error {
if ml == EmptyPayload {
return errEmptyPayload
}
query := url.Values{
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
}
headers := t.tsc.client.getStandardHeaders()
headers[headerAccept] = string(ml)
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query)
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
if err != nil {
return err
}
defer resp.Body.Close()
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, t)
if err != nil {
return err
}
return nil
}
// Create creates the referenced table.
// This function fails if the name is not compliant
// with the specification or the tables already exists.
// ml determines the level of detail of metadata in the operation response,
// or no data at all.
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table
func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error {
uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
})
type createTableRequest struct {
TableName string `json:"TableName"`
}
req := createTableRequest{TableName: t.Name}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(req); err != nil {
return err
}
headers := t.tsc.client.getStandardHeaders()
headers = addReturnContentHeaders(headers, ml)
headers = addBodyRelatedHeaders(headers, buf.Len())
headers = options.addToHeaders(headers)
resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth)
if err != nil {
return err
}
defer resp.Body.Close()
if ml == EmptyPayload {
if err := checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
} else {
if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil {
return err
}
}
if ml != EmptyPayload {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(data, t)
if err != nil {
return err
}
}
return nil
}
// Delete deletes the referenced table.
// This function fails if the table is not present.
// Be advised: Delete deletes all the entries that may be present.
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table
func (t *Table) Delete(timeout uint, options *TableOptions) error {
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{
"timeout": {strconv.Itoa(int(timeout))},
})
headers := t.tsc.client.getStandardHeaders()
headers = addReturnContentHeaders(headers, EmptyPayload)
headers = options.addToHeaders(headers)
resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// QueryOptions includes options for a query entities operation.
// Top, filter and select are OData query options.
type QueryOptions struct {
Top uint
Filter string
Select []string
RequestID string
}
func (options *QueryOptions) getParameters() (url.Values, map[string]string) {
query := url.Values{}
headers := map[string]string{}
if options != nil {
if options.Top > 0 {
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
}
if options.Filter != "" {
query.Add(OdataFilter, options.Filter)
}
if len(options.Select) > 0 {
query.Add(OdataSelect, strings.Join(options.Select, ","))
}
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
}
return query, headers
}
// QueryEntities returns the entities in the table.
// You can use query options defined by the OData Protocol specification.
//
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) {
if ml == EmptyPayload {
return nil, errEmptyPayload
}
query, headers := options.getParameters()
query = addTimeout(query, timeout)
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query)
return t.queryEntities(uri, headers, ml)
}
// NextResults returns the next page of results
// from a QueryEntities or NextResults operation.
//
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) {
if eqr == nil {
return nil, errNilPreviousResult
}
if eqr.NextLink == nil {
return nil, errNilNextLink
}
headers := options.addToHeaders(map[string]string{})
return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml)
}
// SetPermissions sets up table ACL permissions
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL
func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error {
params := url.Values{"comp": {"acl"},
"timeout": {strconv.Itoa(int(timeout))},
}
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
headers := t.tsc.client.getStandardHeaders()
headers = options.addToHeaders(headers)
body, length, err := generateTableACLPayload(tap)
if err != nil {
return err
}
headers["Content-Length"] = strconv.Itoa(length)
resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, tap := range policies {
permission := generateTablePermissions(&tap)
signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
// GetPermissions gets the table ACL permissions
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl
func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) {
params := url.Values{"comp": {"acl"},
"timeout": {strconv.Itoa(int(timeout))},
}
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
headers := t.tsc.client.getStandardHeaders()
headers = options.addToHeaders(headers)
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
var ap AccessPolicy
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
return updateTableAccessPolicy(ap), nil
}
func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) {
headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders())
if ml != EmptyPayload {
headers[headerAccept] = string(ml)
}
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var entities EntityQueryResult
err = json.Unmarshal(data, &entities)
if err != nil {
return nil, err
}
for i := range entities.Entities {
entities.Entities[i].Table = t
}
entities.table = t
contToken := extractContinuationTokenFromHeaders(resp.Header)
if contToken == nil {
entities.NextLink = nil
} else {
originalURI, err := url.Parse(uri)
if err != nil {
return nil, err
}
v := originalURI.Query()
if contToken.NextPartitionKey != "" {
v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey)
}
if contToken.NextRowKey != "" {
v.Set(nextRowKeyQueryParameter, contToken.NextRowKey)
}
newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v)
entities.NextLink = &newURI
entities.ml = ml
}
return &entities, nil
}
func extractContinuationTokenFromHeaders(h http.Header) *continuationToken {
ct := continuationToken{
NextPartitionKey: h.Get(headerNextPartitionKey),
NextRowKey: h.Get(headerNextRowKey),
}
if ct.NextPartitionKey != "" || ct.NextRowKey != "" {
return &ct
}
return nil
}
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
taps := []TableAccessPolicy{}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
tap := TableAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
taps = append(taps, tap)
}
return taps
}
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
// generate the permissions string (raud).
// still want the end user API to have bool flags.
permissions = ""
if tap.CanRead {
permissions += "r"
}
if tap.CanAppend {
permissions += "a"
}
if tap.CanUpdate {
permissions += "u"
}
if tap.CanDelete {
permissions += "d"
}
return permissions
}

View File

@ -1,325 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/textproto"
"sort"
"strings"
)
// Operation type. Insert, Delete, Replace etc.
type Operation int
// consts for batch operations.
const (
InsertOp = Operation(1)
DeleteOp = Operation(2)
ReplaceOp = Operation(3)
MergeOp = Operation(4)
InsertOrReplaceOp = Operation(5)
InsertOrMergeOp = Operation(6)
)
// BatchEntity used for tracking Entities to operate on and
// whether operations (replace/merge etc) should be forced.
// Wrapper for regular Entity with additional data specific for the entity.
type BatchEntity struct {
*Entity
Force bool
Op Operation
}
// TableBatch stores all the entities that will be operated on during a batch process.
// Entities can be inserted, replaced or deleted.
type TableBatch struct {
BatchEntitySlice []BatchEntity
// reference to table we're operating on.
Table *Table
}
// defaultChangesetHeaders for changeSets
var defaultChangesetHeaders = map[string]string{
"Accept": "application/json;odata=minimalmetadata",
"Content-Type": "application/json",
"Prefer": "return-no-content",
}
// NewBatch return new TableBatch for populating.
func (t *Table) NewBatch() *TableBatch {
return &TableBatch{
Table: t,
}
}
// InsertEntity adds an entity in preparation for a batch insert.
func (t *TableBatch) InsertEntity(entity *Entity) {
be := BatchEntity{Entity: entity, Force: false, Op: InsertOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace.
func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) {
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag
func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) {
t.InsertOrReplaceEntity(entity, true)
}
// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge.
func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) {
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag
func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) {
t.InsertOrMergeEntity(entity, true)
}
// ReplaceEntity adds an entity in preparation for a batch replace.
func (t *TableBatch) ReplaceEntity(entity *Entity) {
be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// DeleteEntity adds an entity in preparation for a batch delete
func (t *TableBatch) DeleteEntity(entity *Entity, force bool) {
be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag
func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) {
t.DeleteEntity(entity, true)
}
// MergeEntity adds an entity in preparation for a batch merge
func (t *TableBatch) MergeEntity(entity *Entity) {
be := BatchEntity{Entity: entity, Force: false, Op: MergeOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// ExecuteBatch executes many table operations in one request to Azure.
// The operations can be combinations of Insert, Delete, Replace and Merge
// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses
// the changesets.
// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions
func (t *TableBatch) ExecuteBatch() error {
id, err := newUUID()
if err != nil {
return err
}
changesetBoundary := fmt.Sprintf("changeset_%s", id.String())
uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil)
changesetBody, err := t.generateChangesetBody(changesetBoundary)
if err != nil {
return err
}
id, err = newUUID()
if err != nil {
return err
}
boundary := fmt.Sprintf("batch_%s", id.String())
body, err := generateBody(changesetBody, changesetBoundary, boundary)
if err != nil {
return err
}
headers := t.Table.tsc.client.getStandardHeaders()
headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary)
resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp.resp)
if err = checkRespCode(resp.resp, []int{http.StatusAccepted}); err != nil {
// check which batch failed.
operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value)
requestID, date, version := getDebugHeaders(resp.resp.Header)
return AzureStorageServiceError{
StatusCode: resp.resp.StatusCode,
Code: resp.odata.Err.Code,
RequestID: requestID,
Date: date,
APIVersion: version,
Message: operationFailedMessage,
}
}
return nil
}
// getFailedOperation parses the original Azure error string and determines which operation failed
// and generates appropriate message.
func (t *TableBatch) getFailedOperation(errorMessage string) string {
// errorMessage consists of "number:string" we just need the number.
sp := strings.Split(errorMessage, ":")
if len(sp) > 1 {
msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage)
return msg
}
// cant parse the message, just return the original message to client
return errorMessage
}
// generateBody generates the complete body for the batch request.
func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) {
body := new(bytes.Buffer)
writer := multipart.NewWriter(body)
writer.SetBoundary(boundary)
h := make(textproto.MIMEHeader)
h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary))
batchWriter, err := writer.CreatePart(h)
if err != nil {
return nil, err
}
batchWriter.Write(changeSetBody.Bytes())
writer.Close()
return body, nil
}
// generateChangesetBody generates the individual changesets for the various operations within the batch request.
// There is a changeset for Insert, Delete, Merge etc.
func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) {
body := new(bytes.Buffer)
writer := multipart.NewWriter(body)
writer.SetBoundary(changesetBoundary)
for _, be := range t.BatchEntitySlice {
t.generateEntitySubset(&be, writer)
}
writer.Close()
return body, nil
}
// generateVerb generates the HTTP request VERB required for each changeset.
func generateVerb(op Operation) (string, error) {
switch op {
case InsertOp:
return http.MethodPost, nil
case DeleteOp:
return http.MethodDelete, nil
case ReplaceOp, InsertOrReplaceOp:
return http.MethodPut, nil
case MergeOp, InsertOrMergeOp:
return "MERGE", nil
default:
return "", errors.New("Unable to detect operation")
}
}
// generateQueryPath generates the query path for within the changesets
// For inserts it will just be a table query path (table name)
// but for other operations (modifying an existing entity) then
// the partition/row keys need to be generated.
func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string {
if op == InsertOp {
return entity.Table.buildPath()
}
return entity.buildPath()
}
// generateGenericOperationHeaders generates common headers for a given operation.
func generateGenericOperationHeaders(be *BatchEntity) map[string]string {
retval := map[string]string{}
for k, v := range defaultChangesetHeaders {
retval[k] = v
}
if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp {
if be.Force || be.Entity.OdataEtag == "" {
retval["If-Match"] = "*"
} else {
retval["If-Match"] = be.Entity.OdataEtag
}
}
return retval
}
// generateEntitySubset generates body payload for particular batch entity
func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error {
h := make(textproto.MIMEHeader)
h.Set(headerContentType, "application/http")
h.Set(headerContentTransferEncoding, "binary")
verb, err := generateVerb(batchEntity.Op)
if err != nil {
return err
}
genericOpHeadersMap := generateGenericOperationHeaders(batchEntity)
queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity)
uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil)
operationWriter, err := writer.CreatePart(h)
if err != nil {
return err
}
urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri)
operationWriter.Write([]byte(urlAndVerb))
writeHeaders(genericOpHeadersMap, &operationWriter)
operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body.
// delete operation doesn't need a body.
if batchEntity.Op != DeleteOp {
//var e Entity = batchEntity.Entity
body, err := json.Marshal(batchEntity.Entity)
if err != nil {
return err
}
operationWriter.Write(body)
}
return nil
}
func writeHeaders(h map[string]string, writer *io.Writer) {
// This way it is guaranteed the headers will be written in a sorted order
var keys []string
for k := range h {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
(*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k])))
}
}

View File

@ -1,204 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
)
const (
headerAccept = "Accept"
headerEtag = "Etag"
headerPrefer = "Prefer"
headerXmsContinuation = "x-ms-Continuation-NextTableName"
)
// TableServiceClient contains operations for Microsoft Azure Table Storage
// Service.
type TableServiceClient struct {
client Client
auth authentication
}
// TableOptions includes options for some table operations
type TableOptions struct {
RequestID string
}
func (options *TableOptions) addToHeaders(h map[string]string) map[string]string {
if options != nil {
h = addToHeaders(h, "x-ms-client-request-id", options.RequestID)
}
return h
}
// QueryNextLink includes information for getting the next page of
// results in query operations
type QueryNextLink struct {
NextLink *string
ml MetadataLevel
}
// GetServiceProperties gets the properties of your storage account's table service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return t.client.getServiceProperties(tableServiceName, t.auth)
}
// SetServiceProperties sets the properties of your storage account's table service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
return t.client.setServiceProperties(props, tableServiceName, t.auth)
}
// GetTableReference returns a Table object for the specified table name.
func (t *TableServiceClient) GetTableReference(name string) *Table {
return &Table{
tsc: t,
Name: name,
}
}
// QueryTablesOptions includes options for some table operations
type QueryTablesOptions struct {
Top uint
Filter string
RequestID string
}
func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) {
query := url.Values{}
headers := map[string]string{}
if options != nil {
if options.Top > 0 {
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
}
if options.Filter != "" {
query.Add(OdataFilter, options.Filter)
}
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
}
return query, headers
}
// QueryTables returns the tables in the storage account.
// You can use query options defined by the OData Protocol specification.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables
func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) {
query, headers := options.getParameters()
uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query)
return t.queryTables(uri, headers, ml)
}
// NextResults returns the next page of results
// from a QueryTables or a NextResults operation.
//
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) {
if tqr == nil {
return nil, errNilPreviousResult
}
if tqr.NextLink == nil {
return nil, errNilNextLink
}
headers := options.addToHeaders(map[string]string{})
return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml)
}
// TableQueryResult contains the response from
// QueryTables and QueryTablesNextResults functions.
type TableQueryResult struct {
OdataMetadata string `json:"odata.metadata"`
Tables []Table `json:"value"`
QueryNextLink
tsc *TableServiceClient
}
func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) {
if ml == EmptyPayload {
return nil, errEmptyPayload
}
headers = mergeHeaders(headers, t.client.getStandardHeaders())
headers[headerAccept] = string(ml)
resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var out TableQueryResult
err = json.Unmarshal(respBody, &out)
if err != nil {
return nil, err
}
for i := range out.Tables {
out.Tables[i].tsc = t
}
out.tsc = t
nextLink := resp.Header.Get(http.CanonicalHeaderKey(headerXmsContinuation))
if nextLink == "" {
out.NextLink = nil
} else {
originalURI, err := url.Parse(uri)
if err != nil {
return nil, err
}
v := originalURI.Query()
v.Set(nextTableQueryParameter, nextLink)
newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v)
out.NextLink = &newURI
out.ml = ml
}
return &out, nil
}
func addBodyRelatedHeaders(h map[string]string, length int) map[string]string {
h[headerContentType] = "application/json"
h[headerContentLength] = fmt.Sprintf("%v", length)
h[headerAcceptCharset] = "UTF-8"
return h
}
func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string {
if ml != EmptyPayload {
h[headerPrefer] = "return-content"
h[headerAccept] = string(ml)
} else {
h[headerPrefer] = "return-no-content"
// From API version 2015-12-11 onwards, Accept header is required
h[headerAccept] = string(NoMetadata)
}
return h
}

View File

@ -1,260 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
uuid "github.com/satori/go.uuid"
)
var (
fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6))
accountSASOptions = AccountSASTokenOptions{
Services: Services{
Blob: true,
},
ResourceTypes: ResourceTypes{
Service: true,
Container: true,
Object: true,
},
Permissions: Permissions{
Read: true,
Write: true,
Delete: true,
List: true,
Add: true,
Create: true,
Update: true,
Process: true,
},
Expiry: fixedTime,
UseHTTPS: true,
}
)
func (c Client) computeHmac256(message string) string {
h := hmac.New(sha256.New, c.accountKey)
h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
func currentTimeRfc1123Formatted() string {
return timeRfc1123Formatted(time.Now().UTC())
}
func timeRfc1123Formatted(t time.Time) string {
return t.Format(http.TimeFormat)
}
func timeRFC3339Formatted(t time.Time) string {
return t.Format("2006-01-02T15:04:05.0000000Z")
}
func mergeParams(v1, v2 url.Values) url.Values {
out := url.Values{}
for k, v := range v1 {
out[k] = v
}
for k, v := range v2 {
vals, ok := out[k]
if ok {
vals = append(vals, v...)
out[k] = vals
} else {
out[k] = v
}
}
return out
}
func prepareBlockListRequest(blocks []Block) string {
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
for _, v := range blocks {
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
}
s += `</BlockList>`
return s
}
func xmlUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
return xml.Unmarshal(data, v)
}
func xmlMarshal(v interface{}) (io.Reader, int, error) {
b, err := xml.Marshal(v)
if err != nil {
return nil, 0, err
}
return bytes.NewReader(b), len(b), nil
}
func headersFromStruct(v interface{}) map[string]string {
headers := make(map[string]string)
value := reflect.ValueOf(v)
for i := 0; i < value.NumField(); i++ {
key := value.Type().Field(i).Tag.Get("header")
if key != "" {
reflectedValue := reflect.Indirect(value.Field(i))
var val string
if reflectedValue.IsValid() {
switch reflectedValue.Type() {
case reflect.TypeOf(fixedTime):
val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time))
case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)):
val = strconv.FormatUint(reflectedValue.Uint(), 10)
case reflect.TypeOf(int(0)):
val = strconv.FormatInt(reflectedValue.Int(), 10)
default:
val = reflectedValue.String()
}
}
if val != "" {
headers[key] = val
}
}
}
return headers
}
// merges extraHeaders into headers and returns headers
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
for k, v := range extraHeaders {
headers[k] = v
}
return headers
}
func addToHeaders(h map[string]string, key, value string) map[string]string {
if value != "" {
h[key] = value
}
return h
}
func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string {
if value != nil {
h = addToHeaders(h, key, timeRfc1123Formatted(*value))
}
return h
}
func addTimeout(params url.Values, timeout uint) url.Values {
if timeout > 0 {
params.Add("timeout", fmt.Sprintf("%v", timeout))
}
return params
}
func addSnapshot(params url.Values, snapshot *time.Time) url.Values {
if snapshot != nil {
params.Add("snapshot", timeRFC3339Formatted(*snapshot))
}
return params
}
func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) {
var out time.Time
var err error
outStr := h.Get(key)
if outStr != "" {
out, err = time.Parse(time.RFC1123, outStr)
if err != nil {
return nil, err
}
}
return &out, nil
}
// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling
type TimeRFC1123 time.Time
// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout.
func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var value string
d.DecodeElement(&value, &start)
parse, err := time.Parse(time.RFC1123, value)
if err != nil {
return err
}
*t = TimeRFC1123(parse)
return nil
}
// MarshalXML marshals using time.RFC1123.
func (t *TimeRFC1123) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(time.Time(*t).Format(time.RFC1123), start)
}
// returns a map of custom metadata values from the specified HTTP header
func getMetadataFromHeaders(header http.Header) map[string]string {
metadata := make(map[string]string)
for k, v := range header {
// Can't trust CanonicalHeaderKey() to munge case
// reliably. "_" is allowed in identifiers:
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
// http://tools.ietf.org/html/rfc7230#section-3.2
// ...but "_" is considered invalid by
// CanonicalMIMEHeaderKey in
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
k = strings.ToLower(k)
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
continue
}
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
k = k[len(userDefinedMetadataHeaderPrefix):]
metadata[k] = v[len(v)-1]
}
if len(metadata) == 0 {
return nil
}
return metadata
}
// newUUID returns a new uuid using RFC 4122 algorithm.
func newUUID() (uuid.UUID, error) {
u := [16]byte{}
// Set all bits to randomly (or pseudo-randomly) chosen values.
_, err := rand.Read(u[:])
if err != nil {
return uuid.UUID{}, err
}
u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) // u.setVariant(ReservedRFC4122)
u[6] = (u[6] & 0xF) | (uuid.V4 << 4) // u.setVersion(V4)
return uuid.FromBytes(u[:])
}

View File

@ -18,4 +18,4 @@ package version
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// Number contains the semantic version of this SDK.
const Number = "v36.2.0"
const Number = "v40.3.0"

View File

@ -8,5 +8,5 @@ require (
github.com/Azure/go-autorest/autorest/mocks v0.3.0
github.com/Azure/go-autorest/tracing v0.5.0
github.com/dgrijalva/jwt-go v3.2.0+incompatible
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413
)

View File

@ -19,5 +19,10 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -24,6 +24,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"net/http"
@ -248,7 +249,7 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo
"sub": spt.inner.ClientID,
"jti": base64.URLEncoding.EncodeToString(jti),
"nbf": time.Now().Unix(),
"exp": time.Now().Add(time.Hour * 24).Unix(),
"exp": time.Now().Add(24 * time.Hour).Unix(),
}
signedString, err := token.SignedString(secret.PrivateKey)
@ -972,6 +973,10 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http
delay := time.Duration(0)
for attempt < maxAttempts {
if resp != nil && resp.Body != nil {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
resp, err = sender.Do(req)
// we want to retry if err is not nil or the status code is in the list of retry codes
if err == nil && !responseHasStatusCode(resp, retries...) {

View File

@ -171,20 +171,21 @@ func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
removeRequestBody(&rCopy)
resp, err := bacb.sender.Do(&rCopy)
if err == nil && resp.StatusCode == 401 {
defer resp.Body.Close()
if hasBearerChallenge(resp) {
bc, err := newBearerChallenge(resp)
if err != nil {
return r, err
}
DrainResponseBody(resp)
if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) {
bc, err := newBearerChallenge(resp.Header)
if err != nil {
return r, err
}
if bacb.callback != nil {
ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
if err != nil {
return r, err
}
if bacb.callback != nil {
ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
if err != nil {
return r, err
}
return Prepare(r, ba.WithAuthorization())
}
return Prepare(r, ba.WithAuthorization())
}
}
}
@ -194,8 +195,8 @@ func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
}
// returns true if the HTTP response contains a bearer challenge
func hasBearerChallenge(resp *http.Response) bool {
authHeader := resp.Header.Get(bearerChallengeHeader)
func hasBearerChallenge(header http.Header) bool {
authHeader := header.Get(bearerChallengeHeader)
if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 {
return false
}
@ -206,8 +207,8 @@ type bearerChallenge struct {
values map[string]string
}
func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) {
challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader))
func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) {
challenge := strings.TrimSpace(header.Get(bearerChallengeHeader))
trimmedChallenge := challenge[len(bearer)+1:]
// challenge is a set of key=value pairs that are comma delimited

View File

@ -0,0 +1,69 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"net/http"
"strings"
)
// SASTokenAuthorizer implements an authorization for SAS Token Authentication
// this can be used for interaction with Blob Storage Endpoints
type SASTokenAuthorizer struct {
sasToken string
}
// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials
func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) {
if strings.TrimSpace(sasToken) == "" {
return nil, fmt.Errorf("sasToken cannot be empty")
}
token := sasToken
if strings.HasPrefix(sasToken, "?") {
token = strings.TrimPrefix(sasToken, "?")
}
return &SASTokenAuthorizer{
sasToken: token,
}, nil
}
// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the
// URI's query parameters. This can be used for the Blob, Queue, and File Services.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature
func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err != nil {
return r, err
}
if r.URL.RawQuery != "" {
// When retrying the request we need to ensure the sasToken isn't present
if !strings.Contains(r.URL.RawQuery, sas.sasToken) {
r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken)
}
} else {
r.URL.RawQuery = sas.sasToken
}
return Prepare(r)
})
}
}

View File

@ -0,0 +1,318 @@
package autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
// SharedKeyType defines the enumeration for the various shared key types.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types.
type SharedKeyType string
const (
// SharedKey is used to authorize against blobs, files and queues services.
SharedKey SharedKeyType = "sharedKey"
// SharedKey is used to authorize against the account.
SharedKeyForAccount SharedKeyType = "sharedKeyAccount"
// SharedKeyForTable is used to authorize against the table service.
SharedKeyForTable SharedKeyType = "sharedKeyTable"
// SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for
// backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead.
SharedKeyLite SharedKeyType = "sharedKeyLite"
// SharedKeyLiteForTable is used to authorize against the table service. It's provided for
// backwards compatibility with older table API versions. Prefer SharedKeyForTable instead.
SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable"
)
const (
headerAccept = "Accept"
headerAcceptCharset = "Accept-Charset"
headerContentEncoding = "Content-Encoding"
headerContentLength = "Content-Length"
headerContentMD5 = "Content-MD5"
headerContentLanguage = "Content-Language"
headerIfModifiedSince = "If-Modified-Since"
headerIfMatch = "If-Match"
headerIfNoneMatch = "If-None-Match"
headerIfUnmodifiedSince = "If-Unmodified-Since"
headerDate = "Date"
headerXMSDate = "X-Ms-Date"
headerXMSVersion = "x-ms-version"
headerRange = "Range"
)
const storageEmulatorAccountName = "devstoreaccount1"
// SharedKeyAuthorizer implements an authorization for Shared Key
// this can be used for interaction with Blob, File and Queue Storage Endpoints
type SharedKeyAuthorizer struct {
accountName string
accountKey []byte
keyType SharedKeyType
}
// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type.
func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) {
key, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
return nil, fmt.Errorf("malformed storage account key: %v", err)
}
return &SharedKeyAuthorizer{
accountName: accountName,
accountKey: key,
keyType: keyType,
}, nil
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
// value is "<SharedKeyType> " followed by the computed key.
// This can be used for the Blob, Queue, and File Services
//
// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key
// You may use Shared Key authorization to authorize a request made against the
// 2009-09-19 version and later of the Blob and Queue services,
// and version 2014-02-14 and later of the File services.
func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err != nil {
return r, err
}
sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType)
if err != nil {
return r, err
}
return Prepare(r, WithHeader(headerAuthorization, sk))
})
}
}
func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) {
canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType)
if err != nil {
return "", err
}
if req.Header == nil {
req.Header = http.Header{}
}
// ensure date is set
if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" {
date := time.Now().UTC().Format(http.TimeFormat)
req.Header.Set(headerXMSDate, date)
}
if keyType == SharedKeyForAccount {
// ensure a content length is set if appropriate
if req.Header.Get(headerContentLength) == "" {
req.Header.Set("Content-Length", fmt.Sprintf("%d", int(req.ContentLength)))
}
}
canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType)
if err != nil {
return "", err
}
return createAuthorizationHeader(accName, accKey, canString, keyType), nil
}
func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) {
errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := bytes.NewBufferString("")
if accountName != storageEmulatorAccountName {
cr.WriteString("/")
cr.WriteString(getCanonicalizedAccountName(accountName))
if keyType == SharedKeyForAccount {
cr.WriteString("/")
}
}
if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr.WriteString(u.EscapedPath())
}
params, err := url.ParseQuery(u.RawQuery)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
if keyType == SharedKey || keyType == SharedKeyForAccount {
if len(params) > 0 {
cr.WriteString("\n")
keys := []string{}
for key := range params {
keys = append(keys, key)
}
sort.Strings(keys)
completeParams := []string{}
for _, key := range keys {
if len(params[key]) > 1 {
sort.Strings(params[key])
}
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
}
cr.WriteString(strings.Join(completeParams, "\n"))
}
} else {
// search for "comp" parameter, if exists then add it to canonicalizedresource
if v, ok := params["comp"]; ok {
cr.WriteString("?comp=" + v[0])
}
}
return string(cr.Bytes()), nil
}
func getCanonicalizedAccountName(accountName string) string {
// since we may be trying to access a secondary storage account, we need to
// remove the -secondary part of the storage name
return strings.TrimSuffix(accountName, "-secondary")
}
func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) {
contentLength := headers.Get(headerContentLength)
if contentLength == "0" {
contentLength = ""
}
date := headers.Get(headerDate)
if v := headers.Get(headerXMSDate); v != "" {
if keyType == SharedKey || keyType == SharedKeyForAccount || keyType == SharedKeyLite {
date = ""
} else {
date = v
}
}
var canString string
switch keyType {
case SharedKey, SharedKeyForAccount:
canString = strings.Join([]string{
verb,
headers.Get(headerContentEncoding),
headers.Get(headerContentLanguage),
contentLength,
headers.Get(headerContentMD5),
headers.Get(headerContentType),
date,
headers.Get(headerIfModifiedSince),
headers.Get(headerIfMatch),
headers.Get(headerIfNoneMatch),
headers.Get(headerIfUnmodifiedSince),
headers.Get(headerRange),
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case SharedKeyForTable:
canString = strings.Join([]string{
verb,
headers.Get(headerContentMD5),
headers.Get(headerContentType),
date,
canonicalizedResource,
}, "\n")
case SharedKeyLite:
canString = strings.Join([]string{
verb,
headers.Get(headerContentMD5),
headers.Get(headerContentType),
date,
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case SharedKeyLiteForTable:
canString = strings.Join([]string{
date,
canonicalizedResource,
}, "\n")
default:
return "", fmt.Errorf("key type '%s' is not supported", keyType)
}
return canString, nil
}
func buildCanonicalizedHeader(headers http.Header) string {
cm := make(map[string]string)
for k := range headers {
headerName := strings.TrimSpace(strings.ToLower(k))
if strings.HasPrefix(headerName, "x-ms-") {
cm[headerName] = headers.Get(k)
}
}
if len(cm) == 0 {
return ""
}
keys := []string{}
for key := range cm {
keys = append(keys, key)
}
sort.Strings(keys)
ch := bytes.NewBufferString("")
for _, key := range keys {
ch.WriteString(key)
ch.WriteRune(':')
ch.WriteString(cm[key])
ch.WriteRune('\n')
}
return strings.TrimSuffix(string(ch.Bytes()), "\n")
}
func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string {
h := hmac.New(sha256.New, accountKey)
h.Write([]byte(canonicalizedString))
signature := base64.StdEncoding.EncodeToString(h.Sum(nil))
var key string
switch keyType {
case SharedKey, SharedKeyForAccount, SharedKeyForTable:
key = "SharedKey"
case SharedKeyLite, SharedKeyLiteForTable:
key = "SharedKeyLite"
}
return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature)
}

View File

@ -258,7 +258,17 @@ func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) {
if err != nil {
return nil, err
}
return sender.Do(req)
resp, err := sender.Do(req)
if err == nil && resp.Body != nil {
// copy the body and close it so callers don't have to
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp, err
}
resp.Body = ioutil.NopCloser(bytes.NewReader(b))
}
return resp, err
}
type pollingTracker interface {

View File

@ -17,6 +17,7 @@ package azure
// limitations under the License.
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
@ -143,7 +144,7 @@ type RequestError struct {
autorest.DetailedError
// The error returned by the Azure service.
ServiceError *ServiceError `json:"error"`
ServiceError *ServiceError `json:"error" xml:"Error"`
// The request id (from the x-ms-request-id-header) of the request.
RequestID string
@ -285,26 +286,34 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
var e RequestError
defer resp.Body.Close()
encodedAs := autorest.EncodedAsJSON
if strings.Contains(resp.Header.Get("Content-Type"), "xml") {
encodedAs = autorest.EncodedAsXML
}
// Copy and replace the Body in case it does not contain an error object.
// This will leave the Body available to the caller.
b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e)
b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e)
resp.Body = ioutil.NopCloser(&b)
if decodeErr != nil {
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
}
if e.ServiceError == nil {
// Check if error is unwrapped ServiceError
if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil {
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
if err := decoder.Decode(&e.ServiceError); err != nil {
return err
}
}
if e.ServiceError.Message == "" {
// if we're here it means the returned error wasn't OData v4 compliant.
// try to unmarshal the body as raw JSON in hopes of getting something.
// try to unmarshal the body in hopes of getting something.
rawBody := map[string]interface{}{}
if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil {
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
if err := decoder.Decode(&rawBody); err != nil {
return err
}
e.ServiceError = &ServiceError{
Code: "Unknown",
Message: "Unknown service error",

View File

@ -3,8 +3,9 @@ module github.com/Azure/go-autorest/autorest/azure/cli
go 1.12
require (
github.com/Azure/go-autorest/autorest/adal v0.5.0
github.com/Azure/go-autorest/autorest/date v0.1.0
github.com/Azure/go-autorest/autorest v0.9.0
github.com/Azure/go-autorest/autorest/adal v0.6.0
github.com/Azure/go-autorest/autorest/date v0.2.0
github.com/dimchansky/utfbom v1.1.0
github.com/mitchellh/go-homedir v1.1.0
)

View File

@ -1,9 +1,21 @@
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo=
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=

View File

@ -1,4 +1,6 @@
package storage
// +build modhack
package cli
// Copyright 2017 Microsoft Corporation
//
@ -14,25 +16,9 @@ package storage
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/url"
"time"
)
// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
// the resultant binary.
// SASOptions includes options used by SAS URIs for different
// services and resources.
type SASOptions struct {
APIVersion string
Start time.Time
Expiry time.Time
IP string
UseHTTPS bool
Identifier string
}
func addQueryParameter(query url.Values, key, value string) url.Values {
if value != "" {
query.Add(key, value)
}
return query
}
// Necessary for safely adding multi-module repo.
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
import _ "github.com/Azure/go-autorest/autorest"

View File

@ -47,11 +47,15 @@ func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator {
if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration {
return resp, err
}
var re RequestError
err = autorest.Respond(
resp,
autorest.ByUnmarshallingJSON(&re),
)
if strings.Contains(r.Header.Get("Content-Type"), "xml") {
// XML errors (e.g. Storage Data Plane) only return the inner object
err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError))
} else {
err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re))
}
if err != nil {
return resp, err
}

View File

@ -179,6 +179,11 @@ type Client struct {
// Set to true to skip attempted registration of resource providers (false by default).
SkipResourceProviderRegistration bool
// SendDecorators can be used to override the default chain of SendDecorators.
// This can be used to specify things like a custom retry SendDecorator.
// Set this to an empty slice to use no SendDecorators.
SendDecorators []SendDecorator
}
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
@ -298,3 +303,21 @@ func (c Client) ByInspecting() RespondDecorator {
}
return c.ResponseInspector
}
// Send sends the provided http.Request using the client's Sender or the default sender.
// It returns the http.Response and possible error. It also accepts a, possibly empty,
// default set of SendDecorators used when sending the request.
// SendDecorators have the following precedence:
// 1. In a request's context via WithSendDecorators()
// 2. Specified on the client in SendDecorators
// 3. The default values specified in this method
func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) {
if c.SendDecorators != nil {
decorators = c.SendDecorators
}
inCtx := req.Context().Value(ctxSendDecorators{})
if sd, ok := inCtx.([]SendDecorator); ok {
decorators = sd
}
return SendWithSender(c, req, decorators...)
}

View File

@ -3,9 +3,9 @@ module github.com/Azure/go-autorest/autorest
go 1.12
require (
github.com/Azure/go-autorest/autorest/adal v0.5.0
github.com/Azure/go-autorest/autorest/mocks v0.2.0
github.com/Azure/go-autorest/autorest/adal v0.8.2
github.com/Azure/go-autorest/autorest/mocks v0.3.0
github.com/Azure/go-autorest/logger v0.1.0
github.com/Azure/go-autorest/tracing v0.5.0
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413
)

View File

@ -1,11 +1,18 @@
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
@ -14,5 +21,10 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -243,6 +243,7 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
if err != nil {
return resp, err
}
DrainResponseBody(resp)
resp, err = s.Do(rr.Request())
if err == nil {
return resp, err
@ -256,6 +257,12 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
}
}
// Count429AsRetry indicates that a 429 response should be included as a retry attempt.
var Count429AsRetry = true
// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received.
var Max429Delay time.Duration
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
// number of attempts, exponentially backing off between requests using the supplied backoff
// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request.
@ -263,7 +270,7 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...)
return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...)
})
}
}
@ -275,7 +282,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...)
return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...)
})
}
}
@ -283,11 +290,12 @@ func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, code
func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) {
rr := NewRetriableRequest(r)
// Increment to add the first call (attempts denotes number of retries)
for attempt := 0; attempt < attempts+1; {
for attempt, delayCount := 0, 0; attempt < attempts+1; {
err = rr.Prepare()
if err != nil {
return
}
DrainResponseBody(resp)
resp, err = s.Do(rr.Request())
// we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
// resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
@ -295,7 +303,12 @@ func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempt
return resp, err
}
delayed := DelayWithRetryAfter(resp, r.Context().Done())
if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) {
// if this was a 429 set the delay cap as specified.
// applicable only in the absence of a retry-after header.
if resp != nil && resp.StatusCode == http.StatusTooManyRequests {
cap = Max429Delay
}
if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) {
return resp, r.Context().Err()
}
// when count429 == false don't count a 429 against the number
@ -303,6 +316,9 @@ func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempt
if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) {
attempt++
}
// delay count is tracked separately from attempts to
// ensure that 429 participates in exponential back-off
delayCount++
}
return resp, err
}
@ -347,6 +363,7 @@ func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
if err != nil {
return resp, err
}
DrainResponseBody(resp)
resp, err = s.Do(rr.Request())
if err == nil {
return resp, err

View File

@ -20,6 +20,7 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
@ -140,18 +141,18 @@ func MapToValues(m map[string]interface{}) url.Values {
return v
}
// AsStringSlice method converts interface{} to []string. This expects a
//that the parameter passed to be a slice or array of a type that has the underlying
//type a string.
// AsStringSlice method converts interface{} to []string.
// s must be of type slice or array or an error is returned.
// Each element of s will be converted to its string representation.
func AsStringSlice(s interface{}) ([]string, error) {
v := reflect.ValueOf(s)
if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.")
return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.")
}
stringSlice := make([]string, 0, v.Len())
for i := 0; i < v.Len(); i++ {
stringSlice = append(stringSlice, v.Index(i).String())
stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i)))
}
return stringSlice, nil
}
@ -226,3 +227,13 @@ func IsTemporaryNetworkError(err error) bool {
}
return false
}
// DrainResponseBody reads the response body then closes it.
func DrainResponseBody(resp *http.Response) error {
if resp != nil && resp.Body != nil {
_, err := io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
return err
}
return nil
}

View File

@ -19,7 +19,7 @@ import (
"runtime"
)
const number = "v13.0.2"
const number = "v14.0.0"
var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",

201
vendor/github.com/tombuildsstuff/giovanni/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,46 @@
## Blob Storage Blobs SDK for API version 2018-11-09
This package allows you to interact with the Blobs Blob Storage API
### Supported Authorizers
* Azure Active Directory (for the Resource Endpoint `https://storage.azure.com`)
* SharedKeyLite (Blob, File & Queue)
### Example Usage
```go
package main
import (
"context"
"fmt"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
)
func Example() error {
accountName := "storageaccount1"
storageAccountKey := "ABC123...."
containerName := "mycontainer"
fileName := "example-large-file.iso"
storageAuth := autorest.NewSharedKeyLiteAuthorizer(accountName, storageAccountKey)
blobClient := blobs.New()
blobClient.Client.Authorizer = storageAuth
ctx := context.TODO()
copyInput := blobs.CopyInput{
CopySource: "http://releases.ubuntu.com/18.04.2/ubuntu-18.04.2-desktop-amd64.iso",
}
refreshInterval := 5 * time.Second
if err := blobClient.CopyAndWait(ctx, accountName, containerName, fileName, copyInput, refreshInterval); err != nil {
return fmt.Errorf("Error copying: %s", err)
}
return nil
}
```

View File

@ -0,0 +1,180 @@
package blobs
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type AppendBlockInput struct {
// A number indicating the byte offset to compare.
// Append Block will succeed only if the append position is equal to this number.
// If it is not, the request will fail with an AppendPositionConditionNotMet
// error (HTTP status code 412 Precondition Failed)
BlobConditionAppendPosition *int64
// The max length in bytes permitted for the append blob.
// If the Append Block operation would cause the blob to exceed that limit or if the blob size
// is already greater than the value specified in this header, the request will fail with
// an MaxBlobSizeConditionNotMet error (HTTP status code 412 Precondition Failed).
BlobConditionMaxSize *int64
// The Bytes which should be appended to the end of this Append Blob.
// This can either be nil, which creates an empty blob, or a byte array
Content *[]byte
// An MD5 hash of the block content.
// This hash is used to verify the integrity of the block during transport.
// When this header is specified, the storage service compares the hash of the content
// that has arrived with this header value.
//
// Note that this MD5 hash is not stored with the blob.
// If the two hashes do not match, the operation will fail with error code 400 (Bad Request).
ContentMD5 *string
// Required if the blob has an active lease.
// To perform this operation on a blob with an active lease, specify the valid lease ID for this header.
LeaseID *string
}
type AppendBlockResult struct {
autorest.Response
BlobAppendOffset string
BlobCommittedBlockCount int64
ContentMD5 string
ETag string
LastModified string
}
// AppendBlock commits a new block of data to the end of an existing append blob.
func (client Client) AppendBlock(ctx context.Context, accountName, containerName, blobName string, input AppendBlockInput) (result AppendBlockResult, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "AppendBlock", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "AppendBlock", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "AppendBlock", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "AppendBlock", "`blobName` cannot be an empty string.")
}
if input.Content != nil && len(*input.Content) > (4*1024*1024) {
return result, validation.NewError("files.Client", "PutByteRange", "`input.Content` must be at most 4MB.")
}
req, err := client.AppendBlockPreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "AppendBlock", nil, "Failure preparing request")
return
}
resp, err := client.AppendBlockSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "AppendBlock", resp, "Failure sending request")
return
}
result, err = client.AppendBlockResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "AppendBlock", resp, "Failure responding to request")
return
}
return
}
// AppendBlockPreparer prepares the AppendBlock request.
func (client Client) AppendBlockPreparer(ctx context.Context, accountName, containerName, blobName string, input AppendBlockInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"comp": autorest.Encode("query", "appendblock"),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
}
if input.BlobConditionAppendPosition != nil {
headers["x-ms-blob-condition-appendpos"] = *input.BlobConditionAppendPosition
}
if input.BlobConditionMaxSize != nil {
headers["x-ms-blob-condition-maxsize"] = *input.BlobConditionMaxSize
}
if input.ContentMD5 != nil {
headers["x-ms-blob-content-md5"] = *input.ContentMD5
}
if input.LeaseID != nil {
headers["x-ms-lease-id"] = *input.LeaseID
}
if input.Content != nil {
headers["Content-Length"] = int(len(*input.Content))
}
decorators := []autorest.PrepareDecorator{
autorest.AsPut(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeaders(headers),
}
if input.Content != nil {
decorators = append(decorators, autorest.WithBytes(input.Content))
}
preparer := autorest.CreatePreparer(decorators...)
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// AppendBlockSender sends the AppendBlock request. The method will close the
// http.Response Body if it receives an error.
func (client Client) AppendBlockSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// AppendBlockResponder handles the response to the AppendBlock request. The method always
// closes the http.Response Body.
func (client Client) AppendBlockResponder(resp *http.Response) (result AppendBlockResult, err error) {
if resp != nil && resp.Header != nil {
result.BlobAppendOffset = resp.Header.Get("x-ms-blob-append-offset")
result.ContentMD5 = resp.Header.Get("ETag")
result.ETag = resp.Header.Get("ETag")
result.LastModified = resp.Header.Get("Last-Modified")
if v := resp.Header.Get("x-ms-blob-committed-block-count"); v != "" {
i, innerErr := strconv.Atoi(v)
if innerErr != nil {
err = fmt.Errorf("Error parsing %q as an integer: %s", v, innerErr)
return
}
result.BlobCommittedBlockCount = int64(i)
}
}
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusCreated),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,25 @@
package blobs
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
)
// Client is the base client for Blob Storage Blobs.
type Client struct {
autorest.Client
BaseURI string
}
// New creates an instance of the Client client.
func New() Client {
return NewWithEnvironment(azure.PublicCloud)
}
// NewWithBaseURI creates an instance of the Client client.
func NewWithEnvironment(environment azure.Environment) Client {
return Client{
Client: autorest.NewClientWithUserAgent(UserAgent()),
BaseURI: environment.StorageEndpointSuffix,
}
}

View File

@ -0,0 +1,235 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
"github.com/tombuildsstuff/giovanni/storage/internal/metadata"
)
type CopyInput struct {
// Specifies the name of the source blob or file.
// Beginning with version 2012-02-12, this value may be a URL of up to 2 KB in length that specifies a blob.
// The value should be URL-encoded as it would appear in a request URI.
// A source blob in the same storage account can be authenticated via Shared Key.
// However, if the source is a blob in another account,
// the source blob must either be public or must be authenticated via a shared access signature.
// If the source blob is public, no authentication is required to perform the copy operation.
//
// Beginning with version 2015-02-21, the source object may be a file in the Azure File service.
// If the source object is a file that is to be copied to a blob, then the source file must be authenticated
// using a shared access signature, whether it resides in the same account or in a different account.
//
// Only storage accounts created on or after June 7th, 2012 allow the Copy Blob operation to
// copy from another storage account.
CopySource string
// The ID of the Lease
// Required if the destination blob has an active lease.
// The lease ID specified for this header must match the lease ID of the destination blob.
// If the request does not include the lease ID or it is not valid,
// the operation fails with status code 412 (Precondition Failed).
//
// If this header is specified and the destination blob does not currently have an active lease,
// the operation will also fail with status code 412 (Precondition Failed).
LeaseID *string
// The ID of the Lease on the Source Blob
// Specify to perform the Copy Blob operation only if the lease ID matches the active lease ID of the source blob.
SourceLeaseID *string
// For page blobs on a premium account only. Specifies the tier to be set on the target blob
AccessTier *AccessTier
// A user-defined name-value pair associated with the blob.
// If no name-value pairs are specified, the operation will copy the metadata from the source blob or
// file to the destination blob.
// If one or more name-value pairs are specified, the destination blob is created with the specified metadata,
// and metadata is not copied from the source blob or file.
MetaData map[string]string
// An ETag value.
// Specify an ETag value for this conditional header to copy the blob only if the specified
// ETag value matches the ETag value for an existing destination blob.
// If the ETag for the destination blob does not match the ETag specified for If-Match,
// the Blob service returns status code 412 (Precondition Failed).
IfMatch *string
// An ETag value, or the wildcard character (*).
// Specify an ETag value for this conditional header to copy the blob only if the specified
// ETag value does not match the ETag value for the destination blob.
// Specify the wildcard character (*) to perform the operation only if the destination blob does not exist.
// If the specified condition isn't met, the Blob service returns status code 412 (Precondition Failed).
IfNoneMatch *string
// A DateTime value.
// Specify this conditional header to copy the blob only if the destination blob
// has been modified since the specified date/time.
// If the destination blob has not been modified, the Blob service returns status code 412 (Precondition Failed).
IfModifiedSince *string
// A DateTime value.
// Specify this conditional header to copy the blob only if the destination blob
// has not been modified since the specified date/time.
// If the destination blob has been modified, the Blob service returns status code 412 (Precondition Failed).
IfUnmodifiedSince *string
// An ETag value.
// Specify this conditional header to copy the source blob only if its ETag matches the value specified.
// If the ETag values do not match, the Blob service returns status code 412 (Precondition Failed).
// This cannot be specified if the source is an Azure File.
SourceIfMatch *string
// An ETag value.
// Specify this conditional header to copy the blob only if its ETag does not match the value specified.
// If the values are identical, the Blob service returns status code 412 (Precondition Failed).
// This cannot be specified if the source is an Azure File.
SourceIfNoneMatch *string
// A DateTime value.
// Specify this conditional header to copy the blob only if the source blob has been modified
// since the specified date/time.
// If the source blob has not been modified, the Blob service returns status code 412 (Precondition Failed).
// This cannot be specified if the source is an Azure File.
SourceIfModifiedSince *string
// A DateTime value.
// Specify this conditional header to copy the blob only if the source blob has not been modified
// since the specified date/time.
// If the source blob has been modified, the Blob service returns status code 412 (Precondition Failed).
// This header cannot be specified if the source is an Azure File.
SourceIfUnmodifiedSince *string
}
type CopyResult struct {
autorest.Response
CopyID string
CopyStatus string
}
// Copy copies a blob to a destination within the storage account asynchronously.
func (client Client) Copy(ctx context.Context, accountName, containerName, blobName string, input CopyInput) (result CopyResult, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "Copy", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "Copy", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "Copy", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "Copy", "`blobName` cannot be an empty string.")
}
if input.CopySource == "" {
return result, validation.NewError("blobs.Client", "Copy", "`input.CopySource` cannot be an empty string.")
}
req, err := client.CopyPreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "Copy", nil, "Failure preparing request")
return
}
resp, err := client.CopySender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "Copy", resp, "Failure sending request")
return
}
result, err = client.CopyResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "Copy", resp, "Failure responding to request")
return
}
return
}
// CopyPreparer prepares the Copy request.
func (client Client) CopyPreparer(ctx context.Context, accountName, containerName, blobName string, input CopyInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
"x-ms-copy-source": autorest.Encode("header", input.CopySource),
}
if input.LeaseID != nil {
headers["x-ms-lease-id"] = *input.LeaseID
}
if input.SourceLeaseID != nil {
headers["x-ms-source-lease-id"] = *input.SourceLeaseID
}
if input.AccessTier != nil {
headers["x-ms-access-tier"] = string(*input.AccessTier)
}
if input.IfMatch != nil {
headers["If-Match"] = *input.IfMatch
}
if input.IfNoneMatch != nil {
headers["If-None-Match"] = *input.IfNoneMatch
}
if input.IfUnmodifiedSince != nil {
headers["If-Unmodified-Since"] = *input.IfUnmodifiedSince
}
if input.IfModifiedSince != nil {
headers["If-Modified-Since"] = *input.IfModifiedSince
}
if input.SourceIfMatch != nil {
headers["x-ms-source-if-match"] = *input.SourceIfMatch
}
if input.SourceIfNoneMatch != nil {
headers["x-ms-source-if-none-match"] = *input.SourceIfNoneMatch
}
if input.SourceIfModifiedSince != nil {
headers["x-ms-source-if-modified-since"] = *input.SourceIfModifiedSince
}
if input.SourceIfUnmodifiedSince != nil {
headers["x-ms-source-if-unmodified-since"] = *input.SourceIfUnmodifiedSince
}
headers = metadata.SetIntoHeaders(headers, input.MetaData)
preparer := autorest.CreatePreparer(
autorest.AsPut(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CopySender sends the Copy request. The method will close the
// http.Response Body if it receives an error.
func (client Client) CopySender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// CopyResponder handles the response to the Copy request. The method always
// closes the http.Response Body.
func (client Client) CopyResponder(resp *http.Response) (result CopyResult, err error) {
if resp != nil && resp.Header != nil {
result.CopyID = resp.Header.Get("x-ms-copy-id")
}
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusAccepted),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,110 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type AbortCopyInput struct {
// The Copy ID which should be aborted
CopyID string
// The ID of the Lease
// This must be specified if a Lease is present on the Blob, else a 403 is returned
LeaseID *string
}
// AbortCopy aborts a pending Copy Blob operation, and leaves a destination blob with zero length and full metadata.
func (client Client) AbortCopy(ctx context.Context, accountName, containerName, blobName string, input AbortCopyInput) (result autorest.Response, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "AbortCopy", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "AbortCopy", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "AbortCopy", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "AbortCopy", "`blobName` cannot be an empty string.")
}
if input.CopyID == "" {
return result, validation.NewError("blobs.Client", "AbortCopy", "`input.CopyID` cannot be an empty string.")
}
req, err := client.AbortCopyPreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "AbortCopy", nil, "Failure preparing request")
return
}
resp, err := client.AbortCopySender(req)
if err != nil {
result = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "AbortCopy", resp, "Failure sending request")
return
}
result, err = client.AbortCopyResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "AbortCopy", resp, "Failure responding to request")
return
}
return
}
// AbortCopyPreparer prepares the AbortCopy request.
func (client Client) AbortCopyPreparer(ctx context.Context, accountName, containerName, blobName string, input AbortCopyInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"comp": autorest.Encode("query", "copy"),
"copyid": autorest.Encode("query", input.CopyID),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
"x-ms-copy-action": "abort",
}
if input.LeaseID != nil {
headers["x-ms-lease-id"] = *input.LeaseID
}
preparer := autorest.CreatePreparer(
autorest.AsPut(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeaders(headers))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// AbortCopySender sends the AbortCopy request. The method will close the
// http.Response Body if it receives an error.
func (client Client) AbortCopySender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// AbortCopyResponder handles the response to the AbortCopy request. The method always
// closes the http.Response Body.
func (client Client) AbortCopyResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusNoContent),
autorest.ByClosing())
result = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,41 @@
package blobs
import (
"context"
"fmt"
"time"
)
// CopyAndWait copies a blob to a destination within the storage account and waits for it to finish copying.
func (client Client) CopyAndWait(ctx context.Context, accountName, containerName, blobName string, input CopyInput, pollingInterval time.Duration) error {
if _, err := client.Copy(ctx, accountName, containerName, blobName, input); err != nil {
return fmt.Errorf("Error copying: %s", err)
}
for true {
getInput := GetPropertiesInput{
LeaseID: input.LeaseID,
}
getResult, err := client.GetProperties(ctx, accountName, containerName, blobName, getInput)
if err != nil {
return fmt.Errorf("")
}
switch getResult.CopyStatus {
case Aborted:
return fmt.Errorf("Copy was aborted: %s", getResult.CopyStatusDescription)
case Failed:
return fmt.Errorf("Copy failed: %s", getResult.CopyStatusDescription)
case Success:
return nil
case Pending:
time.Sleep(pollingInterval)
continue
}
}
return fmt.Errorf("Unexpected error waiting for the copy to complete")
}

View File

@ -0,0 +1,105 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type DeleteInput struct {
// Should any Snapshots for this Blob also be deleted?
// If the Blob has Snapshots and this is set to False a 409 Conflict will be returned
DeleteSnapshots bool
// The ID of the Lease
// This must be specified if a Lease is present on the Blob, else a 403 is returned
LeaseID *string
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
func (client Client) Delete(ctx context.Context, accountName, containerName, blobName string, input DeleteInput) (result autorest.Response, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "Delete", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "Delete", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "Delete", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "Delete", "`blobName` cannot be an empty string.")
}
req, err := client.DeletePreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "Delete", resp, "Failure responding to request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client Client) DeletePreparer(ctx context.Context, accountName, containerName, blobName string, input DeleteInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
}
if input.LeaseID != nil {
headers["x-ms-lease-id"] = *input.LeaseID
}
if input.DeleteSnapshots {
headers["x-ms-delete-snapshots"] = "include"
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client Client) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusAccepted),
autorest.ByClosing())
result = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,108 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type DeleteSnapshotInput struct {
// The ID of the Lease
// This must be specified if a Lease is present on the Blob, else a 403 is returned
LeaseID *string
// The DateTime of the Snapshot which should be marked for Deletion
SnapshotDateTime string
}
// DeleteSnapshot marks a single Snapshot of a Blob for Deletion based on it's DateTime, which will be deleted during the next Garbage Collection cycle.
func (client Client) DeleteSnapshot(ctx context.Context, accountName, containerName, blobName string, input DeleteSnapshotInput) (result autorest.Response, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "DeleteSnapshot", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "DeleteSnapshot", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "DeleteSnapshot", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "DeleteSnapshot", "`blobName` cannot be an empty string.")
}
if input.SnapshotDateTime == "" {
return result, validation.NewError("blobs.Client", "DeleteSnapshot", "`input.SnapshotDateTime` cannot be an empty string.")
}
req, err := client.DeleteSnapshotPreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "DeleteSnapshot", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSnapshotSender(req)
if err != nil {
result = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "DeleteSnapshot", resp, "Failure sending request")
return
}
result, err = client.DeleteSnapshotResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "DeleteSnapshot", resp, "Failure responding to request")
return
}
return
}
// DeleteSnapshotPreparer prepares the DeleteSnapshot request.
func (client Client) DeleteSnapshotPreparer(ctx context.Context, accountName, containerName, blobName string, input DeleteSnapshotInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"snapshot": autorest.Encode("query", input.SnapshotDateTime),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
}
if input.LeaseID != nil {
headers["x-ms-lease-id"] = *input.LeaseID
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeaders(headers))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSnapshotSender sends the DeleteSnapshot request. The method will close the
// http.Response Body if it receives an error.
func (client Client) DeleteSnapshotSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// DeleteSnapshotResponder handles the response to the DeleteSnapshot request. The method always
// closes the http.Response Body.
func (client Client) DeleteSnapshotResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusAccepted),
autorest.ByClosing())
result = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,99 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type DeleteSnapshotsInput struct {
// The ID of the Lease
// This must be specified if a Lease is present on the Blob, else a 403 is returned
LeaseID *string
}
// DeleteSnapshots marks all Snapshots of a Blob for Deletion, which will be deleted during the next Garbage Collection Cycle.
func (client Client) DeleteSnapshots(ctx context.Context, accountName, containerName, blobName string, input DeleteSnapshotsInput) (result autorest.Response, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "DeleteSnapshots", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "DeleteSnapshots", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "DeleteSnapshots", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "DeleteSnapshots", "`blobName` cannot be an empty string.")
}
req, err := client.DeleteSnapshotsPreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "DeleteSnapshots", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSnapshotsSender(req)
if err != nil {
result = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "DeleteSnapshots", resp, "Failure sending request")
return
}
result, err = client.DeleteSnapshotsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "DeleteSnapshots", resp, "Failure responding to request")
return
}
return
}
// DeleteSnapshotsPreparer prepares the DeleteSnapshots request.
func (client Client) DeleteSnapshotsPreparer(ctx context.Context, accountName, containerName, blobName string, input DeleteSnapshotsInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
// only delete the snapshots but leave the blob as-is
"x-ms-delete-snapshots": "only",
}
if input.LeaseID != nil {
headers["x-ms-lease-id"] = *input.LeaseID
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSnapshotsSender sends the DeleteSnapshots request. The method will close the
// http.Response Body if it receives an error.
func (client Client) DeleteSnapshotsSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// DeleteSnapshotsResponder handles the response to the DeleteSnapshots request. The method always
// closes the http.Response Body.
func (client Client) DeleteSnapshotsResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusAccepted),
autorest.ByClosing())
result = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,116 @@
package blobs
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type GetInput struct {
LeaseID *string
StartByte *int64
EndByte *int64
}
type GetResult struct {
autorest.Response
Contents []byte
}
// Get reads or downloads a blob from the system, including its metadata and properties.
func (client Client) Get(ctx context.Context, accountName, containerName, blobName string, input GetInput) (result GetResult, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "Get", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "Get", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "Get", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "Get", "`blobName` cannot be an empty string.")
}
if input.LeaseID != nil && *input.LeaseID == "" {
return result, validation.NewError("blobs.Client", "Get", "`input.LeaseID` should either be specified or nil, not an empty string.")
}
if (input.StartByte != nil && input.EndByte == nil) || input.StartByte == nil && input.EndByte != nil {
return result, validation.NewError("blobs.Client", "Get", "`input.StartByte` and `input.EndByte` must both be specified, or both be nil.")
}
req, err := client.GetPreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client Client) GetPreparer(ctx context.Context, accountName, containerName, blobName string, input GetInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
}
if input.StartByte != nil && input.EndByte != nil {
headers["x-ms-range"] = fmt.Sprintf("bytes=%d-%d", *input.StartByte, *input.EndByte)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client Client) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client Client) GetResponder(resp *http.Response) (result GetResult, err error) {
if resp != nil {
result.Contents = make([]byte, resp.ContentLength)
}
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusPartialContent),
autorest.ByUnmarshallingBytes(&result.Contents),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,140 @@
package blobs
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type GetBlockListInput struct {
BlockListType BlockListType
LeaseID *string
}
type GetBlockListResult struct {
autorest.Response
// The size of the blob in bytes
ContentLength *int64
// The Content Type of the blob
ContentType string
// The ETag associated with this blob
ETag string
// A list of blocks which have been committed
CommittedBlocks CommittedBlocks `xml:"CommittedBlocks,omitempty"`
// A list of blocks which have not yet been committed
UncommittedBlocks UncommittedBlocks `xml:"UncommittedBlocks,omitempty"`
}
// GetBlockList retrieves the list of blocks that have been uploaded as part of a block blob.
func (client Client) GetBlockList(ctx context.Context, accountName, containerName, blobName string, input GetBlockListInput) (result GetBlockListResult, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "GetBlockList", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "GetBlockList", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "GetBlockList", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "GetBlockList", "`blobName` cannot be an empty string.")
}
req, err := client.GetBlockListPreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "GetBlockList", nil, "Failure preparing request")
return
}
resp, err := client.GetBlockListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "GetBlockList", resp, "Failure sending request")
return
}
result, err = client.GetBlockListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "GetBlockList", resp, "Failure responding to request")
return
}
return
}
// GetBlockListPreparer prepares the GetBlockList request.
func (client Client) GetBlockListPreparer(ctx context.Context, accountName, containerName, blobName string, input GetBlockListInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"blocklisttype": autorest.Encode("query", string(input.BlockListType)),
"comp": autorest.Encode("query", "blocklist"),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
}
if input.LeaseID != nil {
headers["x-ms-lease-id"] = *input.LeaseID
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetBlockListSender sends the GetBlockList request. The method will close the
// http.Response Body if it receives an error.
func (client Client) GetBlockListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetBlockListResponder handles the response to the GetBlockList request. The method always
// closes the http.Response Body.
func (client Client) GetBlockListResponder(resp *http.Response) (result GetBlockListResult, err error) {
if resp != nil && resp.Header != nil {
result.ContentType = resp.Header.Get("Content-Type")
result.ETag = resp.Header.Get("ETag")
if v := resp.Header.Get("x-ms-blob-content-length"); v != "" {
i, innerErr := strconv.Atoi(v)
if innerErr != nil {
err = fmt.Errorf("Error parsing %q as an integer: %s", v, innerErr)
return
}
i64 := int64(i)
result.ContentLength = &i64
}
}
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingXML(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,152 @@
package blobs
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type GetPageRangesInput struct {
LeaseID *string
StartByte *int64
EndByte *int64
}
type GetPageRangesResult struct {
autorest.Response
// The size of the blob in bytes
ContentLength *int64
// The Content Type of the blob
ContentType string
// The ETag associated with this blob
ETag string
PageRanges []PageRange `xml:"PageRange"`
}
type PageRange struct {
// The start byte offset for this range, inclusive
Start int64 `xml:"Start"`
// The end byte offset for this range, inclusive
End int64 `xml:"End"`
}
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
func (client Client) GetPageRanges(ctx context.Context, accountName, containerName, blobName string, input GetPageRangesInput) (result GetPageRangesResult, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "GetPageRanges", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "GetPageRanges", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "GetPageRanges", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "GetPageRanges", "`blobName` cannot be an empty string.")
}
if (input.StartByte != nil && input.EndByte == nil) || input.StartByte == nil && input.EndByte != nil {
return result, validation.NewError("blobs.Client", "GetPageRanges", "`input.StartByte` and `input.EndByte` must both be specified, or both be nil.")
}
req, err := client.GetPageRangesPreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "GetPageRanges", nil, "Failure preparing request")
return
}
resp, err := client.GetPageRangesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "GetPageRanges", resp, "Failure sending request")
return
}
result, err = client.GetPageRangesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "GetPageRanges", resp, "Failure responding to request")
return
}
return
}
// GetPageRangesPreparer prepares the GetPageRanges request.
func (client Client) GetPageRangesPreparer(ctx context.Context, accountName, containerName, blobName string, input GetPageRangesInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"comp": autorest.Encode("query", "pagelist"),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
}
if input.LeaseID != nil {
headers["x-ms-lease-id"] = *input.LeaseID
}
if input.StartByte != nil && input.EndByte != nil {
headers["x-ms-range"] = fmt.Sprintf("bytes=%d-%d", *input.StartByte, *input.EndByte)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetPageRangesSender sends the GetPageRanges request. The method will close the
// http.Response Body if it receives an error.
func (client Client) GetPageRangesSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetPageRangesResponder handles the response to the GetPageRanges request. The method always
// closes the http.Response Body.
func (client Client) GetPageRangesResponder(resp *http.Response) (result GetPageRangesResult, err error) {
if resp != nil && resp.Header != nil {
result.ContentType = resp.Header.Get("Content-Type")
result.ETag = resp.Header.Get("ETag")
if v := resp.Header.Get("x-ms-blob-content-length"); v != "" {
i, innerErr := strconv.Atoi(v)
if innerErr != nil {
err = fmt.Errorf("Error parsing %q as an integer: %s", v, innerErr)
return
}
i64 := int64(i)
result.ContentLength = &i64
}
}
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingXML(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,120 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type IncrementalCopyBlobInput struct {
CopySource string
IfModifiedSince *string
IfUnmodifiedSince *string
IfMatch *string
IfNoneMatch *string
}
// IncrementalCopyBlob copies a snapshot of the source page blob to a destination page blob.
// The snapshot is copied such that only the differential changes between the previously copied
// snapshot are transferred to the destination.
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
func (client Client) IncrementalCopyBlob(ctx context.Context, accountName, containerName, blobName string, input IncrementalCopyBlobInput) (result autorest.Response, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "IncrementalCopyBlob", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "IncrementalCopyBlob", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "IncrementalCopyBlob", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "IncrementalCopyBlob", "`blobName` cannot be an empty string.")
}
if input.CopySource == "" {
return result, validation.NewError("blobs.Client", "IncrementalCopyBlob", "`input.CopySource` cannot be an empty string.")
}
req, err := client.IncrementalCopyBlobPreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "IncrementalCopyBlob", nil, "Failure preparing request")
return
}
resp, err := client.IncrementalCopyBlobSender(req)
if err != nil {
result = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "IncrementalCopyBlob", resp, "Failure sending request")
return
}
result, err = client.IncrementalCopyBlobResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "IncrementalCopyBlob", resp, "Failure responding to request")
return
}
return
}
// IncrementalCopyBlobPreparer prepares the IncrementalCopyBlob request.
func (client Client) IncrementalCopyBlobPreparer(ctx context.Context, accountName, containerName, blobName string, input IncrementalCopyBlobInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"comp": autorest.Encode("query", "incrementalcopy"),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
"x-ms-copy-source": input.CopySource,
}
if input.IfModifiedSince != nil {
headers["If-Modified-Since"] = *input.IfModifiedSince
}
if input.IfUnmodifiedSince != nil {
headers["If-Unmodified-Since"] = *input.IfUnmodifiedSince
}
if input.IfMatch != nil {
headers["If-Match"] = *input.IfMatch
}
if input.IfNoneMatch != nil {
headers["If-None-Match"] = *input.IfNoneMatch
}
preparer := autorest.CreatePreparer(
autorest.AsPut(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeaders(headers))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// IncrementalCopyBlobSender sends the IncrementalCopyBlob request. The method will close the
// http.Response Body if it receives an error.
func (client Client) IncrementalCopyBlobSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// IncrementalCopyBlobResponder handles the response to the IncrementalCopyBlob request. The method always
// closes the http.Response Body.
func (client Client) IncrementalCopyBlobResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusAccepted),
autorest.ByClosing())
result = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,135 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type AcquireLeaseInput struct {
// The ID of the existing Lease, if leased
LeaseID *string
// Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires.
// A non-infinite lease can be between 15 and 60 seconds
LeaseDuration int
// The Proposed new ID for the Lease
ProposedLeaseID *string
}
type AcquireLeaseResult struct {
autorest.Response
LeaseID string
}
// AcquireLease establishes and manages a lock on a blob for write and delete operations.
func (client Client) AcquireLease(ctx context.Context, accountName, containerName, blobName string, input AcquireLeaseInput) (result AcquireLeaseResult, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "AcquireLease", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "AcquireLease", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "AcquireLease", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "AcquireLease", "`blobName` cannot be an empty string.")
}
if input.LeaseID != nil && *input.LeaseID == "" {
return result, validation.NewError("blobs.Client", "AcquireLease", "`input.LeaseID` cannot be an empty string, if specified.")
}
if input.ProposedLeaseID != nil && *input.ProposedLeaseID == "" {
return result, validation.NewError("blobs.Client", "AcquireLease", "`input.ProposedLeaseID` cannot be an empty string, if specified.")
}
// An infinite lease duration is -1 seconds. A non-infinite lease can be between 15 and 60 seconds
if input.LeaseDuration != -1 && (input.LeaseDuration <= 15 || input.LeaseDuration >= 60) {
return result, validation.NewError("blobs.Client", "AcquireLease", "`input.LeaseDuration` must be -1 (infinite), or between 15 and 60 seconds.")
}
req, err := client.AcquireLeasePreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "AcquireLease", nil, "Failure preparing request")
return
}
resp, err := client.AcquireLeaseSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "AcquireLease", resp, "Failure sending request")
return
}
result, err = client.AcquireLeaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "AcquireLease", resp, "Failure responding to request")
return
}
return
}
// AcquireLeasePreparer prepares the AcquireLease request.
func (client Client) AcquireLeasePreparer(ctx context.Context, accountName, containerName, blobName string, input AcquireLeaseInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"comp": autorest.Encode("query", "lease"),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
"x-ms-lease-action": "acquire",
"x-ms-lease-duration": input.LeaseDuration,
}
if input.LeaseID != nil {
headers["x-ms-lease-id"] = *input.LeaseID
}
if input.ProposedLeaseID != nil {
headers["x-ms-proposed-lease-id"] = *input.ProposedLeaseID
}
preparer := autorest.CreatePreparer(
autorest.AsPut(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// AcquireLeaseSender sends the AcquireLease request. The method will close the
// http.Response Body if it receives an error.
func (client Client) AcquireLeaseSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// AcquireLeaseResponder handles the response to the AcquireLease request. The method always
// closes the http.Response Body.
func (client Client) AcquireLeaseResponder(resp *http.Response) (result AcquireLeaseResult, err error) {
if resp != nil && resp.Header != nil {
result.LeaseID = resp.Header.Get("x-ms-lease-id")
}
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusCreated),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,124 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type BreakLeaseInput struct {
// For a break operation, proposed duration the lease should continue
// before it is broken, in seconds, between 0 and 60.
// This break period is only used if it is shorter than the time remaining on the lease.
// If longer, the time remaining on the lease is used.
// A new lease will not be available before the break period has expired,
// but the lease may be held for longer than the break period.
// If this header does not appear with a break operation, a fixed-duration lease breaks
// after the remaining lease period elapses, and an infinite lease breaks immediately.
BreakPeriod *int
LeaseID string
}
type BreakLeaseResponse struct {
autorest.Response
// Approximate time remaining in the lease period, in seconds.
// If the break is immediate, 0 is returned.
LeaseTime int
}
// BreakLease breaks an existing lock on a blob using the LeaseID.
func (client Client) BreakLease(ctx context.Context, accountName, containerName, blobName string, input BreakLeaseInput) (result autorest.Response, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "BreakLease", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "BreakLease", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "BreakLease", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "BreakLease", "`blobName` cannot be an empty string.")
}
if input.LeaseID == "" {
return result, validation.NewError("blobs.Client", "BreakLease", "`input.LeaseID` cannot be an empty string.")
}
req, err := client.BreakLeasePreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "BreakLease", nil, "Failure preparing request")
return
}
resp, err := client.BreakLeaseSender(req)
if err != nil {
result = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "BreakLease", resp, "Failure sending request")
return
}
result, err = client.BreakLeaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "BreakLease", resp, "Failure responding to request")
return
}
return
}
// BreakLeasePreparer prepares the BreakLease request.
func (client Client) BreakLeasePreparer(ctx context.Context, accountName, containerName, blobName string, input BreakLeaseInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"comp": autorest.Encode("query", "lease"),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
"x-ms-lease-action": "break",
"x-ms-lease-id": input.LeaseID,
}
if input.BreakPeriod != nil {
headers["x-ms-lease-break-period"] = *input.BreakPeriod
}
preparer := autorest.CreatePreparer(
autorest.AsPut(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// BreakLeaseSender sends the BreakLease request. The method will close the
// http.Response Body if it receives an error.
func (client Client) BreakLeaseSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// BreakLeaseResponder handles the response to the BreakLease request. The method always
// closes the http.Response Body.
func (client Client) BreakLeaseResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusAccepted),
autorest.ByClosing())
result = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,117 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
type ChangeLeaseInput struct {
ExistingLeaseID string
ProposedLeaseID string
}
type ChangeLeaseResponse struct {
autorest.Response
LeaseID string
}
// ChangeLease changes an existing lock on a blob for another lock.
func (client Client) ChangeLease(ctx context.Context, accountName, containerName, blobName string, input ChangeLeaseInput) (result ChangeLeaseResponse, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "ChangeLease", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "ChangeLease", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "ChangeLease", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "ChangeLease", "`blobName` cannot be an empty string.")
}
if input.ExistingLeaseID == "" {
return result, validation.NewError("blobs.Client", "ChangeLease", "`input.ExistingLeaseID` cannot be an empty string.")
}
if input.ProposedLeaseID == "" {
return result, validation.NewError("blobs.Client", "ChangeLease", "`input.ProposedLeaseID` cannot be an empty string.")
}
req, err := client.ChangeLeasePreparer(ctx, accountName, containerName, blobName, input)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "ChangeLease", nil, "Failure preparing request")
return
}
resp, err := client.ChangeLeaseSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "ChangeLease", resp, "Failure sending request")
return
}
result, err = client.ChangeLeaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "ChangeLease", resp, "Failure responding to request")
return
}
return
}
// ChangeLeasePreparer prepares the ChangeLease request.
func (client Client) ChangeLeasePreparer(ctx context.Context, accountName, containerName, blobName string, input ChangeLeaseInput) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"comp": autorest.Encode("query", "lease"),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
"x-ms-lease-action": "change",
"x-ms-lease-id": input.ExistingLeaseID,
"x-ms-proposed-lease-id": input.ProposedLeaseID,
}
preparer := autorest.CreatePreparer(
autorest.AsPut(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ChangeLeaseSender sends the ChangeLease request. The method will close the
// http.Response Body if it receives an error.
func (client Client) ChangeLeaseSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ChangeLeaseResponder handles the response to the ChangeLease request. The method always
// closes the http.Response Body.
func (client Client) ChangeLeaseResponder(resp *http.Response) (result ChangeLeaseResponse, err error) {
if resp != nil && resp.Header != nil {
result.LeaseID = resp.Header.Get("x-ms-lease-id")
}
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,98 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
// ReleaseLease releases a lock based on the Lease ID.
func (client Client) ReleaseLease(ctx context.Context, accountName, containerName, blobName, leaseID string) (result autorest.Response, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "ReleaseLease", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "ReleaseLease", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "ReleaseLease", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "ReleaseLease", "`blobName` cannot be an empty string.")
}
if leaseID == "" {
return result, validation.NewError("blobs.Client", "ReleaseLease", "`leaseID` cannot be an empty string.")
}
req, err := client.ReleaseLeasePreparer(ctx, accountName, containerName, blobName, leaseID)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "ReleaseLease", nil, "Failure preparing request")
return
}
resp, err := client.ReleaseLeaseSender(req)
if err != nil {
result = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "ReleaseLease", resp, "Failure sending request")
return
}
result, err = client.ReleaseLeaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "ReleaseLease", resp, "Failure responding to request")
return
}
return
}
// ReleaseLeasePreparer prepares the ReleaseLease request.
func (client Client) ReleaseLeasePreparer(ctx context.Context, accountName, containerName, blobName, leaseID string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"comp": autorest.Encode("query", "lease"),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
"x-ms-lease-action": "release",
"x-ms-lease-id": leaseID,
}
preparer := autorest.CreatePreparer(
autorest.AsPut(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ReleaseLeaseSender sends the ReleaseLease request. The method will close the
// http.Response Body if it receives an error.
func (client Client) ReleaseLeaseSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ReleaseLeaseResponder handles the response to the ReleaseLease request. The method always
// closes the http.Response Body.
func (client Client) ReleaseLeaseResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result = autorest.Response{Response: resp}
return
}

View File

@ -0,0 +1,97 @@
package blobs
import (
"context"
"net/http"
"strings"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/tombuildsstuff/giovanni/storage/internal/endpoints"
)
func (client Client) RenewLease(ctx context.Context, accountName, containerName, blobName, leaseID string) (result autorest.Response, err error) {
if accountName == "" {
return result, validation.NewError("blobs.Client", "RenewLease", "`accountName` cannot be an empty string.")
}
if containerName == "" {
return result, validation.NewError("blobs.Client", "RenewLease", "`containerName` cannot be an empty string.")
}
if strings.ToLower(containerName) != containerName {
return result, validation.NewError("blobs.Client", "RenewLease", "`containerName` must be a lower-cased string.")
}
if blobName == "" {
return result, validation.NewError("blobs.Client", "RenewLease", "`blobName` cannot be an empty string.")
}
if leaseID == "" {
return result, validation.NewError("blobs.Client", "RenewLease", "`leaseID` cannot be an empty string.")
}
req, err := client.RenewLeasePreparer(ctx, accountName, containerName, blobName, leaseID)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "RenewLease", nil, "Failure preparing request")
return
}
resp, err := client.RenewLeaseSender(req)
if err != nil {
result = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "blobs.Client", "RenewLease", resp, "Failure sending request")
return
}
result, err = client.RenewLeaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "blobs.Client", "RenewLease", resp, "Failure responding to request")
return
}
return
}
// RenewLeasePreparer prepares the RenewLease request.
func (client Client) RenewLeasePreparer(ctx context.Context, accountName, containerName, blobName, leaseID string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerName": autorest.Encode("path", containerName),
"blobName": autorest.Encode("path", blobName),
}
queryParameters := map[string]interface{}{
"comp": autorest.Encode("query", "lease"),
}
headers := map[string]interface{}{
"x-ms-version": APIVersion,
"x-ms-lease-action": "renew",
"x-ms-lease-id": leaseID,
}
preparer := autorest.CreatePreparer(
autorest.AsPut(),
autorest.WithBaseURL(endpoints.GetBlobEndpoint(client.BaseURI, accountName)),
autorest.WithPathParameters("/{containerName}/{blobName}", pathParameters),
autorest.WithHeaders(headers),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// RenewLeaseSender sends the RenewLease request. The method will close the
// http.Response Body if it receives an error.
func (client Client) RenewLeaseSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// RenewLeaseResponder handles the response to the RenewLease request. The method always
// closes the http.Response Body.
func (client Client) RenewLeaseResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result = autorest.Response{Response: resp}
return
}

Some files were not shown because too many files have changed in this diff Show More