diff --git a/openstack/client.go b/openstack/client.go
index 09f11dbd6..90e917331 100644
--- a/openstack/client.go
+++ b/openstack/client.go
@@ -505,3 +505,9 @@ func NewDCSServiceV1(client *golangsdk.ProviderClient, eo golangsdk.EndpointOpts
sc, err := initClientOpts(client, eo, "dcs")
return sc, err
}
+
+// NewOBSService creates a ServiceClient that may be used to access the Object Storage Service.
+func NewOBSService(client *golangsdk.ProviderClient, eo golangsdk.EndpointOpts) (*golangsdk.ServiceClient, error) {
+ sc, err := initClientOpts(client, eo, "object")
+ return sc, err
+}
diff --git a/openstack/obs/auth.go b/openstack/obs/auth.go
new file mode 100644
index 000000000..c1d980306
--- /dev/null
+++ b/openstack/obs/auth.go
@@ -0,0 +1,301 @@
+package obs
+
+import (
+ "fmt"
+ "net/url"
+ "sort"
+ "strings"
+ "time"
+)
+
+func (obsClient ObsClient) doAuthTemporary(method, bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, expires int64) (requestUrl string, err error) {
+
+ isV4 := obsClient.conf.signature == SignatureV4
+
+ requestUrl, canonicalizedUrl := obsClient.conf.formatUrls(bucketName, objectKey, params)
+ parsedRequestUrl, err := url.Parse(requestUrl)
+ if err != nil {
+ return "", err
+ }
+ encodeHeaders(headers)
+
+ hostName := parsedRequestUrl.Host
+
+ skipAuth := obsClient.prepareHeaders(headers, hostName, isV4)
+
+ if !skipAuth {
+ if isV4 {
+ date, _ := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0])
+ shortDate := date.Format(SHORT_DATE_FORMAT)
+ longDate := date.Format(LONG_DATE_FORMAT)
+
+ signedHeaders, _headers := getSignedHeaders(headers)
+
+ credential, scope := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate)
+ params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
+ params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
+ params[PARAM_DATE_AMZ_CAMEL] = longDate
+ params[PARAM_EXPIRES_AMZ_CAMEL] = Int64ToString(expires)
+ params[PARAM_SIGNEDHEADERS_AMZ_CAMEL] = strings.Join(signedHeaders, ";")
+
+ requestUrl, canonicalizedUrl = obsClient.conf.formatUrls(bucketName, objectKey, params)
+ parsedRequestUrl, _ = url.Parse(requestUrl)
+ stringToSign := getV4StringToSign(method, canonicalizedUrl, parsedRequestUrl.RawQuery, scope, longDate, UNSIGNED_PAYLOAD, signedHeaders, _headers)
+ signature := getSignature(stringToSign, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate)
+
+ requestUrl += fmt.Sprintf("&%s=%s", PARAM_SIGNATURE_AMZ_CAMEL, UrlEncode(signature, false))
+
+ } else {
+ originDate := headers[HEADER_DATE_CAMEL][0]
+ date, _ := time.Parse(RFC1123_FORMAT, originDate)
+ expires += date.Unix()
+ headers[HEADER_DATE_CAMEL] = []string{Int64ToString(expires)}
+
+ stringToSign := getV2StringToSign(method, canonicalizedUrl, headers)
+ signature := UrlEncode(Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(stringToSign))), false)
+ if strings.Index(requestUrl, "?") < 0 {
+ requestUrl += "?"
+ } else {
+ requestUrl += "&"
+ }
+ headers[HEADER_DATE_CAMEL] = []string{originDate}
+ requestUrl += fmt.Sprintf("AWSAccessKeyId=%s&Expires=%d&Signature=%s", UrlEncode(obsClient.conf.securityProvider.ak, false),
+ expires, signature)
+ }
+ }
+ return
+}
+
+func (obsClient ObsClient) prepareHeaders(headers map[string][]string, hostName string, isV4 bool) bool {
+ headers[HEADER_HOST_CAMEL] = []string{hostName}
+ if date, ok := headers[HEADER_DATE_AMZ]; ok {
+ flag := false
+ if len(date) == 1 {
+ if isV4 {
+ if t, err := time.Parse(LONG_DATE_FORMAT, date[0]); err == nil {
+ headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(t)}
+ flag = true
+ }
+ } else {
+ if strings.HasSuffix(date[0], "GMT") {
+ headers[HEADER_DATE_CAMEL] = []string{date[0]}
+ flag = true
+ }
+ }
+ }
+ if !flag {
+ delete(headers, HEADER_DATE_AMZ)
+ }
+ }
+
+ if _, ok := headers[HEADER_DATE_CAMEL]; !ok {
+ headers[HEADER_DATE_CAMEL] = []string{FormatUtcToRfc1123(time.Now().UTC())}
+ }
+
+ if obsClient.conf.securityProvider == nil || obsClient.conf.securityProvider.ak == "" || obsClient.conf.securityProvider.sk == "" {
+ doLog(LEVEL_WARN, "No ak/sk provided, skip to construct authorization")
+ return true
+ }
+
+ if obsClient.conf.securityProvider.securityToken != "" {
+ headers[HEADER_STS_TOKEN_AMZ] = []string{obsClient.conf.securityProvider.securityToken}
+ }
+ return false
+}
+
+func (obsClient ObsClient) doAuth(method, bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, hostName string) (requestUrl string, err error) {
+
+ requestUrl, canonicalizedUrl := obsClient.conf.formatUrls(bucketName, objectKey, params)
+ parsedRequestUrl, err := url.Parse(requestUrl)
+ if err != nil {
+ return "", err
+ }
+ encodeHeaders(headers)
+
+ if hostName == "" {
+ hostName = parsedRequestUrl.Host
+ }
+
+ isV4 := obsClient.conf.signature == SignatureV4
+
+ skipAuth := obsClient.prepareHeaders(headers, hostName, isV4)
+
+ if !skipAuth {
+ if isV4 {
+ headers[HEADER_CONTENT_SHA256_AMZ] = []string{EMPTY_CONTENT_SHA256}
+ err = obsClient.v4Auth(method, canonicalizedUrl, parsedRequestUrl.RawQuery, headers)
+ } else {
+ err = obsClient.v2Auth(method, canonicalizedUrl, headers)
+ }
+ }
+ return
+}
+
+func encodeHeaders(headers map[string][]string) {
+ for key, values := range headers {
+ for index, value := range values {
+ values[index] = UrlEncode(value, true)
+ }
+ headers[key] = values
+ }
+}
+
+func attachHeaders(headers map[string][]string) string {
+ length := len(headers)
+ _headers := make(map[string][]string, length)
+ keys := make([]string, 0, length)
+
+ for key, value := range headers {
+ _key := strings.ToLower(strings.TrimSpace(key))
+ if _key != "" {
+ if _key == "content-md5" || _key == "content-type" || _key == "date" || strings.HasPrefix(_key, HEADER_PREFIX) {
+ keys = append(keys, _key)
+ _headers[_key] = value
+ }
+ } else {
+ delete(headers, key)
+ }
+ }
+
+ for _, interestedHeader := range interested_headers {
+ if _, ok := _headers[interestedHeader]; !ok {
+ _headers[interestedHeader] = []string{""}
+ keys = append(keys, interestedHeader)
+ }
+ }
+
+ sort.Strings(keys)
+
+ stringToSign := make([]string, 0, len(keys))
+ for _, key := range keys {
+ var value string
+ if strings.HasPrefix(key, HEADER_PREFIX) {
+ if strings.HasPrefix(key, HEADER_PREFIX_META) {
+ for index, v := range _headers[key] {
+ value += strings.TrimSpace(v)
+ if index != len(_headers[key])-1 {
+ value += ","
+ }
+ }
+ } else {
+ value = strings.Join(_headers[key], ",")
+ }
+ value = fmt.Sprintf("%s:%s", key, value)
+ } else {
+ value = strings.Join(_headers[key], ",")
+ }
+ stringToSign = append(stringToSign, value)
+ }
+ return strings.Join(stringToSign, "\n")
+}
+
+func getV2StringToSign(method, canonicalizedUrl string, headers map[string][]string) string {
+ stringToSign := strings.Join([]string{method, "\n", attachHeaders(headers), "\n", canonicalizedUrl}, "")
+ doLog(LEVEL_DEBUG, "The v2 auth stringToSign:\n%s", stringToSign)
+ return stringToSign
+}
+
+func (obsClient ObsClient) v2Auth(method, canonicalizedUrl string, headers map[string][]string) error {
+ stringToSign := getV2StringToSign(method, canonicalizedUrl, headers)
+ signature := Base64Encode(HmacSha1([]byte(obsClient.conf.securityProvider.sk), []byte(stringToSign)))
+
+ headers[HEADER_AUTH_CAMEL] = []string{fmt.Sprintf("%s %s:%s", V2_HASH_PREFIX, obsClient.conf.securityProvider.ak, signature)}
+ return nil
+}
+
+func getScope(region, shortDate string) string {
+ return fmt.Sprintf("%s/%s/%s/%s", shortDate, region, V4_SERVICE_NAME, V4_SERVICE_SUFFIX)
+}
+
+func getCredential(ak, region, shortDate string) (string, string) {
+ scope := getScope(region, shortDate)
+ return fmt.Sprintf("%s/%s", ak, scope), scope
+}
+
+func getV4StringToSign(method, canonicalizedUrl, queryUrl, scope, longDate, payload string, signedHeaders []string, headers map[string][]string) string {
+
+ canonicalRequest := make([]string, 0, 10+len(signedHeaders)*4)
+ canonicalRequest = append(canonicalRequest, method)
+ canonicalRequest = append(canonicalRequest, "\n")
+ canonicalRequest = append(canonicalRequest, canonicalizedUrl)
+ canonicalRequest = append(canonicalRequest, "\n")
+ canonicalRequest = append(canonicalRequest, queryUrl)
+ canonicalRequest = append(canonicalRequest, "\n")
+
+ for _, signedHeader := range signedHeaders {
+ values, _ := headers[signedHeader]
+ for _, value := range values {
+ canonicalRequest = append(canonicalRequest, signedHeader)
+ canonicalRequest = append(canonicalRequest, ":")
+ canonicalRequest = append(canonicalRequest, value)
+ canonicalRequest = append(canonicalRequest, "\n")
+ }
+ }
+ canonicalRequest = append(canonicalRequest, "\n")
+ canonicalRequest = append(canonicalRequest, strings.Join(signedHeaders, ";"))
+ canonicalRequest = append(canonicalRequest, "\n")
+ canonicalRequest = append(canonicalRequest, payload)
+
+ _canonicalRequest := strings.Join(canonicalRequest, "")
+ doLog(LEVEL_DEBUG, "The v4 auth canonicalRequest:\n%s", _canonicalRequest)
+
+ stringToSign := make([]string, 0, 7)
+ stringToSign = append(stringToSign, V4_HASH_PREFIX)
+ stringToSign = append(stringToSign, "\n")
+ stringToSign = append(stringToSign, longDate)
+ stringToSign = append(stringToSign, "\n")
+ stringToSign = append(stringToSign, scope)
+ stringToSign = append(stringToSign, "\n")
+ stringToSign = append(stringToSign, HexSha256([]byte(_canonicalRequest)))
+
+ _stringToSign := strings.Join(stringToSign, "")
+
+ doLog(LEVEL_DEBUG, "The v4 auth stringToSign:\n%s", _stringToSign)
+ return _stringToSign
+}
+
+func getSignedHeaders(headers map[string][]string) ([]string, map[string][]string) {
+ length := len(headers)
+ _headers := make(map[string][]string, length)
+ signedHeaders := make([]string, 0, length)
+ for key, value := range headers {
+ _key := strings.ToLower(strings.TrimSpace(key))
+ if _key != "" {
+ signedHeaders = append(signedHeaders, _key)
+ _headers[_key] = value
+ } else {
+ delete(headers, key)
+ }
+ }
+ sort.Strings(signedHeaders)
+ return signedHeaders, _headers
+}
+
+func getSignature(stringToSign, sk, region, shortDate string) string {
+ key := HmacSha256([]byte(V4_HASH_PRE+sk), []byte(shortDate))
+ key = HmacSha256(key, []byte(region))
+ key = HmacSha256(key, []byte(V4_SERVICE_NAME))
+ key = HmacSha256(key, []byte(V4_SERVICE_SUFFIX))
+ return Hex(HmacSha256(key, []byte(stringToSign)))
+}
+
+func (obsClient ObsClient) v4Auth(method, canonicalizedUrl, queryUrl string, headers map[string][]string) error {
+ t, err := time.Parse(RFC1123_FORMAT, headers[HEADER_DATE_CAMEL][0])
+ if err != nil {
+ t = time.Now().UTC()
+ }
+ shortDate := t.Format(SHORT_DATE_FORMAT)
+ longDate := t.Format(LONG_DATE_FORMAT)
+
+ signedHeaders, _headers := getSignedHeaders(headers)
+
+ credential, scope := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate)
+
+ stringToSign := getV4StringToSign(method, canonicalizedUrl, queryUrl, scope, longDate, EMPTY_CONTENT_SHA256, signedHeaders, _headers)
+
+ signature := getSignature(stringToSign, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate)
+ headers[HEADER_AUTH_CAMEL] = []string{fmt.Sprintf("%s Credential=%s,SignedHeaders=%s,Signature=%s", V4_HASH_PREFIX, credential, strings.Join(signedHeaders, ";"), signature)}
+ return nil
+}
diff --git a/openstack/obs/client.go b/openstack/obs/client.go
new file mode 100644
index 000000000..a222e7510
--- /dev/null
+++ b/openstack/obs/client.go
@@ -0,0 +1,811 @@
+package obs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strings"
+)
+
+type ObsClient struct {
+ conf *config
+ httpClient *http.Client
+ transport *http.Transport
+}
+
+func New(ak, sk, endpoint string, configurers ...configurer) (*ObsClient, error) {
+ conf := &config{securityProvider: &securityProvider{ak: ak, sk: sk}, endpoint: endpoint}
+ conf.maxRetryCount = -1
+ for _, configurer := range configurers {
+ configurer(conf)
+ }
+
+ if err := conf.initConfigWithDefault(); err != nil {
+ return nil, err
+ }
+
+ transport, err := conf.getTransport()
+ if err != nil {
+ return nil, err
+ }
+
+ info := make([]string, 3)
+ info[0] = fmt.Sprintf("[OBS SDK Version=%s", obs_sdk_version)
+ info[1] = fmt.Sprintf("Endpoint=%s", conf.endpoint)
+ accessMode := "Virtual Hosting"
+ if conf.pathStyle {
+ accessMode = "Path"
+ }
+ info[2] = fmt.Sprintf("Access Mode=%s]", accessMode)
+ doLog(LEVEL_WARN, strings.Join(info, "];["))
+ doLog(LEVEL_DEBUG, "Create obsclient with config:\n%s\n", conf)
+ obsClient := &ObsClient{conf: conf, httpClient: &http.Client{Transport: transport, CheckRedirect: checkRedirectFunc}, transport: transport}
+ return obsClient, nil
+}
+
+func (obsClient ObsClient) Refresh(ak, sk, securityToken string) {
+ sp := &securityProvider{ak: strings.TrimSpace(ak), sk: strings.TrimSpace(sk), securityToken: strings.TrimSpace(securityToken)}
+ obsClient.conf.securityProvider = sp
+}
+
+func (obsClient ObsClient) Close() {
+ obsClient.transport.CloseIdleConnections()
+ obsClient.transport = nil
+ obsClient.httpClient = nil
+ obsClient.conf = nil
+ SyncLog()
+}
+
+func (obsClient ObsClient) GetEndpoint() string {
+ return obsClient.conf.endpoint
+}
+
+func (obsClient ObsClient) ListBuckets(input *ListBucketsInput) (output *ListBucketsOutput, err error) {
+ if input == nil {
+ input = &ListBucketsInput{}
+ }
+ output = &ListBucketsOutput{}
+ err = obsClient.doActionWithoutBucket("ListBuckets", HTTP_GET, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) CreateBucket(input *CreateBucketInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("CreateBucketInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("CreateBucket", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucket(bucketName string) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucket", HTTP_DELETE, bucketName, defaultSerializable, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketStoragePolicy(input *SetBucketStoragePolicyInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketStoragePolicyInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketStoragePolicy", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketStoragePolicy(bucketName string) (output *GetBucketStoragePolicyOutput, err error) {
+ output = &GetBucketStoragePolicyOutput{}
+ err = obsClient.doActionWithBucket("GetBucketStoragePolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStoragePolicy), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) ListObjects(input *ListObjectsInput) (output *ListObjectsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("ListObjectsInput is nil")
+ }
+ output = &ListObjectsOutput{}
+ err = obsClient.doActionWithBucket("ListObjects", HTTP_GET, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = location[0]
+ }
+ }
+ return
+}
+
+func (obsClient ObsClient) ListVersions(input *ListVersionsInput) (output *ListVersionsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("ListVersionsInput is nil")
+ }
+ output = &ListVersionsOutput{}
+ err = obsClient.doActionWithBucket("ListVersions", HTTP_GET, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = location[0]
+ }
+ }
+ return
+}
+
+func (obsClient ObsClient) ListMultipartUploads(input *ListMultipartUploadsInput) (output *ListMultipartUploadsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("ListMultipartUploadsInput is nil")
+ }
+ output = &ListMultipartUploadsOutput{}
+ err = obsClient.doActionWithBucket("ListMultipartUploads", HTTP_GET, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketQuota(input *SetBucketQuotaInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketQuotaInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketQuota", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketQuota(bucketName string) (output *GetBucketQuotaOutput, err error) {
+ output = &GetBucketQuotaOutput{}
+ err = obsClient.doActionWithBucket("GetBucketQuota", HTTP_GET, bucketName, newSubResourceSerial(SubResourceQuota), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) HeadBucket(bucketName string) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("HeadBucket", HTTP_HEAD, bucketName, defaultSerializable, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketMetadata(input *GetBucketMetadataInput) (output *GetBucketMetadataOutput, err error) {
+ output = &GetBucketMetadataOutput{}
+ err = obsClient.doActionWithBucket("GetBucketMetadata", HTTP_HEAD, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetBucketMetadataOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketStorageInfo(bucketName string) (output *GetBucketStorageInfoOutput, err error) {
+ output = &GetBucketStorageInfoOutput{}
+ err = obsClient.doActionWithBucket("GetBucketStorageInfo", HTTP_GET, bucketName, newSubResourceSerial(SubResourceStorageInfo), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketLocation(bucketName string) (output *GetBucketLocationOutput, err error) {
+ output = &GetBucketLocationOutput{}
+ err = obsClient.doActionWithBucket("GetBucketLocation", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLocation), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketAcl(input *SetBucketAclInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketAclInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketAcl", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketAcl(bucketName string) (output *GetBucketAclOutput, err error) {
+ output = &GetBucketAclOutput{}
+ err = obsClient.doActionWithBucket("GetBucketAcl", HTTP_GET, bucketName, newSubResourceSerial(SubResourceAcl), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketPolicy(input *SetBucketPolicyInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketPolicy is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketPolicy", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketPolicy(bucketName string) (output *GetBucketPolicyOutput, err error) {
+ output = &GetBucketPolicyOutput{}
+ err = obsClient.doActionWithBucketV2("GetBucketPolicy", HTTP_GET, bucketName, newSubResourceSerial(SubResourcePolicy), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketPolicy(bucketName string) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketPolicy", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourcePolicy), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketCors(input *SetBucketCorsInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketCorsInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketCors", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketCors(bucketName string) (output *GetBucketCorsOutput, err error) {
+ output = &GetBucketCorsOutput{}
+ err = obsClient.doActionWithBucket("GetBucketCors", HTTP_GET, bucketName, newSubResourceSerial(SubResourceCors), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketCors(bucketName string) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketCors", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceCors), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketVersioning(input *SetBucketVersioningInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketVersioningInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketVersioning", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketVersioning(bucketName string) (output *GetBucketVersioningOutput, err error) {
+ output = &GetBucketVersioningOutput{}
+ err = obsClient.doActionWithBucket("GetBucketVersioning", HTTP_GET, bucketName, newSubResourceSerial(SubResourceVersioning), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketWebsiteConfiguration(input *SetBucketWebsiteConfigurationInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketWebsiteConfigurationInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketWebsiteConfiguration", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketWebsiteConfiguration(bucketName string) (output *GetBucketWebsiteConfigurationOutput, err error) {
+ output = &GetBucketWebsiteConfigurationOutput{}
+ err = obsClient.doActionWithBucket("GetBucketWebsiteConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceWebsite), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketWebsiteConfiguration(bucketName string) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketWebsiteConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceWebsite), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketLoggingConfiguration(input *SetBucketLoggingConfigurationInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketLoggingConfigurationInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketLoggingConfiguration", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketLoggingConfiguration(bucketName string) (output *GetBucketLoggingConfigurationOutput, err error) {
+ output = &GetBucketLoggingConfigurationOutput{}
+ err = obsClient.doActionWithBucket("GetBucketLoggingConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLogging), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketLifecycleConfiguration(input *SetBucketLifecycleConfigurationInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketLifecycleConfigurationInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketLifecycleConfiguration", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketLifecycleConfiguration(bucketName string) (output *GetBucketLifecycleConfigurationOutput, err error) {
+ output = &GetBucketLifecycleConfigurationOutput{}
+ err = obsClient.doActionWithBucket("GetBucketLifecycleConfiguration", HTTP_GET, bucketName, newSubResourceSerial(SubResourceLifecycle), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketLifecycleConfiguration(bucketName string) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketLifecycleConfiguration", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceLifecycle), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketTagging(input *SetBucketTaggingInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketTaggingInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketTagging", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketTagging(bucketName string) (output *GetBucketTaggingOutput, err error) {
+ output = &GetBucketTaggingOutput{}
+ err = obsClient.doActionWithBucket("GetBucketTagging", HTTP_GET, bucketName, newSubResourceSerial(SubResourceTagging), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketTagging(bucketName string) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("DeleteBucketTagging", HTTP_DELETE, bucketName, newSubResourceSerial(SubResourceTagging), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketNotification(input *SetBucketNotificationInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetBucketNotificationInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucket("SetBucketNotification", HTTP_PUT, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketNotification(bucketName string) (output *GetBucketNotificationOutput, err error) {
+ output = &GetBucketNotificationOutput{}
+ err = obsClient.doActionWithBucket("GetBucketNotification", HTTP_GET, bucketName, newSubResourceSerial(SubResourceNotification), output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteObject(input *DeleteObjectInput) (output *DeleteObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("DeleteObjectInput is nil")
+ }
+ output = &DeleteObjectOutput{}
+ err = obsClient.doActionWithBucketAndKey("DeleteObject", HTTP_DELETE, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ ParseDeleteObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteObjects(input *DeleteObjectsInput) (output *DeleteObjectsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("DeleteObjectsInput is nil")
+ }
+ output = &DeleteObjectsOutput{}
+ err = obsClient.doActionWithBucket("DeleteObjects", HTTP_POST, input.Bucket, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetObjectAcl(input *SetObjectAclInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("SetObjectAclInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucketAndKey("SetObjectAcl", HTTP_PUT, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetObjectAcl(input *GetObjectAclInput) (output *GetObjectAclOutput, err error) {
+ if input == nil {
+ return nil, errors.New("GetObjectAclInput is nil")
+ }
+ output = &GetObjectAclOutput{}
+ err = obsClient.doActionWithBucketAndKey("GetObjectAcl", HTTP_GET, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ if versionId, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = versionId[0]
+ }
+ }
+ return
+}
+
+func (obsClient ObsClient) RestoreObject(input *RestoreObjectInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("RestoreObjectInput is nil")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucketAndKey("RestoreObject", HTTP_POST, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetObjectMetadata(input *GetObjectMetadataInput) (output *GetObjectMetadataOutput, err error) {
+ if input == nil {
+ return nil, errors.New("GetObjectMetadataInput is nil")
+ }
+ output = &GetObjectMetadataOutput{}
+ err = obsClient.doActionWithBucketAndKey("GetObjectMetadata", HTTP_HEAD, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetObjectMetadataOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) GetObject(input *GetObjectInput) (output *GetObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("GetObjectInput is nil")
+ }
+ output = &GetObjectOutput{}
+ err = obsClient.doActionWithBucketAndKey("GetObject", HTTP_GET, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) PutObject(input *PutObjectInput) (output *PutObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("PutObjectInput is nil")
+ }
+
+ if input.ContentType == "" && input.Key != "" {
+ if contentType, ok := mime_types[input.Key[strings.LastIndex(input.Key, ".")+1:]]; ok {
+ input.ContentType = contentType
+ }
+ }
+
+ output = &PutObjectOutput{}
+ var repeatable bool
+ if input.Body != nil {
+ _, repeatable = input.Body.(*strings.Reader)
+ if input.ContentLength > 0 {
+ input.Body = &readerWrapper{reader: input.Body, totalCount: input.ContentLength}
+ }
+ }
+ if repeatable {
+ err = obsClient.doActionWithBucketAndKey("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output)
+ } else {
+ err = obsClient.doActionWithBucketAndKeyUnRepeatable("PutObject", HTTP_PUT, input.Bucket, input.Key, input, output)
+ }
+ if err != nil {
+ output = nil
+ } else {
+ ParsePutObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) PutFile(input *PutFileInput) (output *PutObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("PutFileInput is nil")
+ }
+
+ var body io.Reader
+ sourceFile := strings.TrimSpace(input.SourceFile)
+ if sourceFile != "" {
+ fd, err := os.Open(sourceFile)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+
+ stat, err := fd.Stat()
+ if err != nil {
+ return nil, err
+ }
+ fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
+ fileReaderWrapper.reader = fd
+ if input.ContentLength > 0 {
+ if input.ContentLength > stat.Size() {
+ input.ContentLength = stat.Size()
+ }
+ fileReaderWrapper.totalCount = input.ContentLength
+ } else {
+ fileReaderWrapper.totalCount = stat.Size()
+ }
+ body = fileReaderWrapper
+ }
+
+ _input := &PutObjectInput{}
+ _input.PutObjectBasicInput = input.PutObjectBasicInput
+ _input.Body = body
+
+ if _input.ContentType == "" && _input.Key != "" {
+ if contentType, ok := mime_types[_input.Key[strings.LastIndex(_input.Key, ".")+1:]]; ok {
+ _input.ContentType = contentType
+ } else if contentType, ok := mime_types[sourceFile[strings.LastIndex(sourceFile, ".")+1:]]; ok {
+ _input.ContentType = contentType
+ }
+ }
+
+ output = &PutObjectOutput{}
+ err = obsClient.doActionWithBucketAndKey("PutFile", HTTP_PUT, _input.Bucket, _input.Key, _input, output)
+ if err != nil {
+ output = nil
+ } else {
+ ParsePutObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) CopyObject(input *CopyObjectInput) (output *CopyObjectOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CopyObjectInput is nil")
+ }
+
+ if strings.TrimSpace(input.CopySourceBucket) == "" {
+ return nil, errors.New("Source bucket is empty")
+ }
+ if strings.TrimSpace(input.CopySourceKey) == "" {
+ return nil, errors.New("Source key is empty")
+ }
+
+ output = &CopyObjectOutput{}
+ err = obsClient.doActionWithBucketAndKey("CopyObject", HTTP_PUT, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCopyObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) AbortMultipartUpload(input *AbortMultipartUploadInput) (output *BaseModel, err error) {
+ if input == nil {
+ return nil, errors.New("AbortMultipartUploadInput is nil")
+ }
+ if input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+ output = &BaseModel{}
+ err = obsClient.doActionWithBucketAndKey("AbortMultipartUpload", HTTP_DELETE, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) InitiateMultipartUpload(input *InitiateMultipartUploadInput) (output *InitiateMultipartUploadOutput, err error) {
+ if input == nil {
+ return nil, errors.New("InitiateMultipartUploadInput is nil")
+ }
+
+ if input.ContentType == "" && input.Key != "" {
+ if contentType, ok := mime_types[input.Key[strings.LastIndex(input.Key, ".")+1:]]; ok {
+ input.ContentType = contentType
+ }
+ }
+
+ output = &InitiateMultipartUploadOutput{}
+ err = obsClient.doActionWithBucketAndKey("InitiateMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ ParseInitiateMultipartUploadOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) UploadPart(input *UploadPartInput) (output *UploadPartOutput, err error) {
+ if input == nil {
+ return nil, errors.New("UploadPartInput is nil")
+ }
+
+ if input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+
+ output = &UploadPartOutput{}
+ var repeatable bool
+ if input.Body != nil {
+ _, repeatable = input.Body.(*strings.Reader)
+ if input.PartSize > 0 {
+ input.Body = &readerWrapper{reader: input.Body, totalCount: input.PartSize}
+ }
+ } else if sourceFile := strings.TrimSpace(input.SourceFile); sourceFile != "" {
+ fd, err := os.Open(sourceFile)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+
+ stat, err := fd.Stat()
+ if err != nil {
+ return nil, err
+ }
+ fileSize := stat.Size()
+ fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
+ fileReaderWrapper.reader = fd
+
+ if input.Offset < 0 || input.Offset > fileSize {
+ input.Offset = 0
+ }
+
+ if input.PartSize <= 0 || input.PartSize > (fileSize-input.Offset) {
+ input.PartSize = fileSize - input.Offset
+ }
+ fileReaderWrapper.totalCount = input.PartSize
+ fd.Seek(input.Offset, 0)
+ input.Body = fileReaderWrapper
+ repeatable = true
+ }
+ if repeatable {
+ err = obsClient.doActionWithBucketAndKey("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output)
+ } else {
+ err = obsClient.doActionWithBucketAndKeyUnRepeatable("UploadPart", HTTP_PUT, input.Bucket, input.Key, input, output)
+ }
+ if err != nil {
+ output = nil
+ } else {
+ ParseUploadPartOutput(output)
+ output.PartNumber = input.PartNumber
+ }
+ return
+}
+
+func (obsClient ObsClient) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (output *CompleteMultipartUploadOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CompleteMultipartUploadInput is nil")
+ }
+
+ if input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+
+ var parts partSlice = input.Parts
+ sort.Sort(parts)
+
+ output = &CompleteMultipartUploadOutput{}
+ err = obsClient.doActionWithBucketAndKey("CompleteMultipartUpload", HTTP_POST, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCompleteMultipartUploadOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) ListParts(input *ListPartsInput) (output *ListPartsOutput, err error) {
+ if input == nil {
+ return nil, errors.New("ListPartsInput is nil")
+ }
+ if input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+ output = &ListPartsOutput{}
+ err = obsClient.doActionWithBucketAndKey("ListParts", HTTP_GET, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) CopyPart(input *CopyPartInput) (output *CopyPartOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CopyPartInput is nil")
+ }
+ if input.UploadId == "" {
+ return nil, errors.New("UploadId is empty")
+ }
+ if strings.TrimSpace(input.CopySourceBucket) == "" {
+ return nil, errors.New("Source bucket is empty")
+ }
+ if strings.TrimSpace(input.CopySourceKey) == "" {
+ return nil, errors.New("Source key is empty")
+ }
+
+ output = &CopyPartOutput{}
+ err = obsClient.doActionWithBucketAndKey("CopyPart", HTTP_PUT, input.Bucket, input.Key, input, output)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCopyPartOutput(output)
+ output.PartNumber = input.PartNumber
+ }
+ return
+}
diff --git a/openstack/obs/conf.go b/openstack/obs/conf.go
new file mode 100644
index 000000000..a5f0a5319
--- /dev/null
+++ b/openstack/obs/conf.go
@@ -0,0 +1,336 @@
+package obs
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type securityProvider struct {
+ ak string
+ sk string
+ securityToken string
+}
+
+type urlHolder struct {
+ scheme string
+ host string
+ port int
+}
+
+type config struct {
+ securityProvider *securityProvider
+ urlHolder *urlHolder
+ endpoint string
+ signature SignatureType
+ pathStyle bool
+ region string
+ connectTimeout int
+ socketTimeout int
+ headerTimeout int
+ idleConnTimeout int
+ finalTimeout int
+ maxRetryCount int
+ proxyUrl string
+ maxConnsPerHost int
+ sslVerify bool
+ pemCerts []byte
+}
+
+func (conf config) String() string {
+ return fmt.Sprintf("[endpoint:%s, signature:%s, pathStyle:%v, region:%s"+
+ "\nconnectTimeout:%d, socketTimeout:%dheaderTimeout:%d, idleConnTimeout:%d"+
+ "\nmaxRetryCount:%d, maxConnsPerHost:%d, sslVerify:%v, proxyUrl:%s]",
+ conf.endpoint, conf.signature, conf.pathStyle, conf.region,
+ conf.connectTimeout, conf.socketTimeout, conf.headerTimeout, conf.idleConnTimeout,
+ conf.maxRetryCount, conf.maxConnsPerHost, conf.sslVerify, conf.proxyUrl,
+ )
+}
+
+type configurer func(conf *config)
+
+func WithSslVerify(sslVerify bool) configurer {
+ return WithSslVerifyAndPemCerts(sslVerify, nil)
+}
+
+func WithSslVerifyAndPemCerts(sslVerify bool, pemCerts []byte) configurer {
+ return func(conf *config) {
+ conf.sslVerify = sslVerify
+ conf.pemCerts = pemCerts
+ }
+}
+
+func WithHeaderTimeout(headerTimeout int) configurer {
+ return func(conf *config) {
+ conf.headerTimeout = headerTimeout
+ }
+}
+
+func WithProxyUrl(proxyUrl string) configurer {
+ return func(conf *config) {
+ conf.proxyUrl = proxyUrl
+ }
+}
+
+func WithMaxConnections(maxConnsPerHost int) configurer {
+ return func(conf *config) {
+ conf.maxConnsPerHost = maxConnsPerHost
+ }
+}
+
+func WithPathStyle(pathStyle bool) configurer {
+ return func(conf *config) {
+ conf.pathStyle = pathStyle
+ }
+}
+
+func WithSignature(signature SignatureType) configurer {
+ return func(conf *config) {
+ conf.signature = signature
+ }
+}
+
+func WithRegion(region string) configurer {
+ return func(conf *config) {
+ conf.region = region
+ }
+}
+
+func WithConnectTimeout(connectTimeout int) configurer {
+ return func(conf *config) {
+ conf.connectTimeout = connectTimeout
+ }
+}
+
+func WithSocketTimeout(socketTimeout int) configurer {
+ return func(conf *config) {
+ conf.socketTimeout = socketTimeout
+ }
+}
+
+func WithIdleConnTimeout(idleConnTimeout int) configurer {
+ return func(conf *config) {
+ conf.idleConnTimeout = idleConnTimeout
+ }
+}
+
+func WithMaxRetryCount(maxRetryCount int) configurer {
+ return func(conf *config) {
+ conf.maxRetryCount = maxRetryCount
+ }
+}
+
+func WithSecurityToken(securityToken string) configurer {
+ return func(conf *config) {
+ conf.securityProvider.securityToken = securityToken
+ }
+}
+
+func (conf *config) initConfigWithDefault() error {
+ conf.securityProvider.ak = strings.TrimSpace(conf.securityProvider.ak)
+ conf.securityProvider.sk = strings.TrimSpace(conf.securityProvider.sk)
+ conf.securityProvider.securityToken = strings.TrimSpace(conf.securityProvider.securityToken)
+ conf.endpoint = strings.TrimSpace(conf.endpoint)
+ if conf.endpoint == "" {
+ return errors.New("endpoint is not set")
+ }
+
+ if index := strings.Index(conf.endpoint, "?"); index > 0 {
+ conf.endpoint = conf.endpoint[:index]
+ }
+
+ for strings.LastIndex(conf.endpoint, "/") == len(conf.endpoint)-1 {
+ conf.endpoint = conf.endpoint[:len(conf.endpoint)-1]
+ }
+
+ if conf.signature == "" {
+ conf.signature = DEFAULT_SIGNATURE
+ }
+
+ urlHolder := &urlHolder{}
+ var address string
+ if strings.HasPrefix(conf.endpoint, "https://") {
+ urlHolder.scheme = "https"
+ address = conf.endpoint[len("https://"):]
+ } else if strings.HasPrefix(conf.endpoint, "http://") {
+ urlHolder.scheme = "http"
+ address = conf.endpoint[len("http://"):]
+ } else {
+ urlHolder.scheme = "http"
+ address = conf.endpoint
+ }
+
+ addr := strings.Split(address, ":")
+ if len(addr) == 2 {
+ if port, err := strconv.Atoi(addr[1]); err == nil {
+ urlHolder.port = port
+ }
+ }
+ urlHolder.host = addr[0]
+ if urlHolder.port == 0 {
+ if urlHolder.scheme == "https" {
+ urlHolder.port = 443
+ } else {
+ urlHolder.port = 80
+ }
+ }
+
+ conf.urlHolder = urlHolder
+
+ conf.region = strings.TrimSpace(conf.region)
+ if conf.region == "" {
+ conf.region = DEFAULT_REGION
+ }
+
+ if conf.connectTimeout <= 0 {
+ conf.connectTimeout = DEFAULT_CONNECT_TIMEOUT
+ }
+
+ if conf.socketTimeout <= 0 {
+ conf.socketTimeout = DEFAULT_SOCKET_TIMEOUT
+ }
+
+ conf.finalTimeout = conf.socketTimeout * 10
+
+ if conf.headerTimeout <= 0 {
+ conf.headerTimeout = DEFAULT_HEADER_TIMEOUT
+ }
+
+ if conf.idleConnTimeout < 0 {
+ conf.idleConnTimeout = DEFAULT_IDLE_CONN_TIMEOUT
+ }
+
+ if conf.maxRetryCount < 0 {
+ conf.maxRetryCount = DEFAULT_MAX_RETRY_COUNT
+ }
+
+ if conf.maxConnsPerHost <= 0 {
+ conf.maxConnsPerHost = DEFAULT_MAX_CONN_PER_HOST
+ }
+
+ conf.proxyUrl = strings.TrimSpace(conf.proxyUrl)
+ return nil
+}
+
+func (conf *config) getTransport() (*http.Transport, error) {
+ transport := &http.Transport{
+ Dial: func(network, addr string) (net.Conn, error) {
+ conn, err := net.DialTimeout(network, addr, time.Second*time.Duration(conf.connectTimeout))
+ if err != nil {
+ return nil, err
+ }
+ return getConnDelegate(conn, conf.socketTimeout, conf.finalTimeout), nil
+ },
+ MaxIdleConns: conf.maxConnsPerHost,
+ MaxIdleConnsPerHost: conf.maxConnsPerHost,
+ ResponseHeaderTimeout: time.Second * time.Duration(conf.headerTimeout),
+ IdleConnTimeout: time.Second * time.Duration(conf.idleConnTimeout),
+ }
+
+ if conf.proxyUrl != "" {
+ proxyUrl, err := url.Parse(conf.proxyUrl)
+ if err != nil {
+ return nil, err
+ }
+ transport.Proxy = http.ProxyURL(proxyUrl)
+ }
+
+ tlsConfig := &tls.Config{InsecureSkipVerify: !conf.sslVerify}
+ if conf.sslVerify && conf.pemCerts != nil {
+ pool := x509.NewCertPool()
+ pool.AppendCertsFromPEM(conf.pemCerts)
+ tlsConfig.RootCAs = pool
+ }
+ transport.TLSClientConfig = tlsConfig
+
+ return transport, nil
+}
+
+func checkRedirectFunc(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+}
+
+func (conf *config) formatUrls(bucketName, objectKey string, params map[string]string) (requestUrl string, canonicalizedUrl string) {
+
+ urlHolder := conf.urlHolder
+
+ if bucketName == "" {
+ requestUrl = fmt.Sprintf("%s://%s:%d", urlHolder.scheme, urlHolder.host, urlHolder.port)
+ canonicalizedUrl = "/"
+ } else {
+ if conf.pathStyle {
+ requestUrl = fmt.Sprintf("%s://%s:%d/%s", urlHolder.scheme, urlHolder.host, urlHolder.port, bucketName)
+ canonicalizedUrl = "/" + bucketName
+ } else {
+ requestUrl = fmt.Sprintf("%s://%s.%s:%d", urlHolder.scheme, bucketName, urlHolder.host, urlHolder.port)
+ if conf.signature == "v2" {
+ canonicalizedUrl = "/" + bucketName + "/"
+ } else {
+ canonicalizedUrl = "/"
+ }
+ }
+ }
+
+ if objectKey != "" {
+ encodeObjectKey := url.QueryEscape(objectKey)
+ requestUrl += "/" + encodeObjectKey
+ if !strings.HasSuffix(canonicalizedUrl, "/") {
+ canonicalizedUrl += "/"
+ }
+ canonicalizedUrl += encodeObjectKey
+ }
+
+ keys := make([]string, 0, len(params))
+ for key, _ := range params {
+ keys = append(keys, strings.TrimSpace(key))
+ }
+ sort.Strings(keys)
+ i := 0
+ for index, key := range keys {
+ if index == 0 {
+ requestUrl += "?"
+ } else {
+ requestUrl += "&"
+ }
+ _key := url.QueryEscape(key)
+ requestUrl += _key
+
+ _value := params[key]
+
+ if conf.signature == "v4" {
+ requestUrl += "=" + url.QueryEscape(_value)
+ } else {
+ if _value != "" {
+ requestUrl += "=" + url.QueryEscape(_value)
+ _value = "=" + _value
+ } else {
+ _value = ""
+ }
+ if _, ok := allowed_resource_parameter_names[strings.ToLower(key)]; ok {
+ if i == 0 {
+ canonicalizedUrl += "?"
+ } else {
+ canonicalizedUrl += "&"
+ }
+ canonicalizedUrl += getQueryUrl(_key, _value)
+ i++
+ }
+ }
+ }
+ return
+}
+
+func getQueryUrl(key, value string) string {
+ queryUrl := ""
+ queryUrl += key
+ queryUrl += value
+ return queryUrl
+}
diff --git a/openstack/obs/const.go b/openstack/obs/const.go
new file mode 100644
index 000000000..24716162b
--- /dev/null
+++ b/openstack/obs/const.go
@@ -0,0 +1,446 @@
+package obs
+
+const (
+ obs_sdk_version = "2.2.1"
+ USER_AGENT = "obs-sdk-go/" + obs_sdk_version
+ HEADER_PREFIX = "x-amz-"
+ HEADER_PREFIX_META = "x-amz-meta-"
+ HEADER_DATE_AMZ = "x-amz-date"
+ HEADER_STS_TOKEN_AMZ = "x-amz-security-token"
+
+ PREFIX_META = "meta-"
+
+ HEADER_CONTENT_SHA256_AMZ = "x-amz-content-sha256"
+ HEADER_ACL_AMZ = "x-amz-acl"
+ HEADER_COPY_SOURCE_AMZ = "x-amz-copy-source"
+ HEADER_COPY_SOURCE_RANGE_AMZ = "x-amz-copy-source-range"
+ HEADER_RANGE = "Range"
+ HEADER_STORAGE_CLASS = "x-default-storage-class"
+ HEADER_REQUEST_ID = "request-id"
+ HEADER_BUCKET_REGION = "bucket-region"
+ HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN = "access-control-allow-origin"
+ HEADER_ACCESS_CONRTOL_ALLOW_HEADERS = "access-control-allow-headers"
+ HEADER_ACCESS_CONRTOL_MAX_AGE = "access-control-max-age"
+ HEADER_ACCESS_CONRTOL_ALLOW_METHODS = "access-control-allow-methods"
+ HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS = "access-control-expose-headers"
+ HEADER_VERSION_ID = "version-id"
+ HEADER_COPY_SOURCE_VERSION_ID = "copy-source-version-id"
+ HEADER_DELETE_MARKER = "delete-marker"
+ HEADER_WEBSITE_REDIRECT_LOCATION = "website-redirect-location"
+ HEADER_WEBSITE_REDIRECT_LOCATION_AMZ = "x-amz-website-redirect-location"
+ HEADER_METADATA_DIRECTIVE_AMZ = "x-amz-metadata-directive"
+ HEADER_EXPIRATION = "expiration"
+ HEADER_RESTORE = "restore"
+ HEADER_STORAGE_CLASS2 = "storage-class"
+ HEADER_STORAGE_CLASS2_AMZ = "x-amz-storage-class"
+ HEADER_CONTENT_LENGTH = "content-length"
+ HEADER_CONTENT_TYPE = "content-type"
+ HEADER_CONTENT_LANGUAGE = "content-language"
+ HEADER_EXPIRES = "expires"
+ HEADER_CACHE_CONTROL = "cache-control"
+ HEADER_CONTENT_DISPOSITION = "content-disposition"
+ HEADER_CONTENT_ENCODING = "content-encoding"
+
+ HEADER_ETAG = "etag"
+ HEADER_LASTMODIFIED = "last-modified"
+
+ HEADER_COPY_SOURCE_IF_MATCH_AMZ = "x-amz-copy-source-if-match"
+ HEADER_COPY_SOURCE_IF_NONE_MATCH_AMZ = "x-amz-copy-source-if-none-match"
+ HEADER_COPY_SOURCE_IF_MODIFIED_SINCE_AMZ = "x-amz-copy-source-if-modified-since"
+ HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE_AMZ = "x-amz-copy-source-if-unmodified-since"
+
+ HEADER_IF_MATCH = "If-Match"
+ HEADER_IF_NONE_MATCH = "If-None-Match"
+ HEADER_IF_MODIFIED_SINCE = "If-Modified-Since"
+ HEADER_IF_UNMODIFIED_SINCE = "If-Unmodified-Since"
+
+ HEADER_SSEC_ENCRYPTION = "server-side-encryption-customer-algorithm"
+ HEADER_SSEC_KEY = "server-side-encryption-customer-key"
+ HEADER_SSEC_KEY_MD5 = "server-side-encryption-customer-key-MD5"
+
+ HEADER_SSEKMS_ENCRYPTION = "server-side-encryption"
+ HEADER_SSEKMS_KEY = "server-side-encryption-aws-kms-key-id"
+
+ HEADER_SSEC_ENCRYPTION_AMZ = "x-amz-server-side-encryption-customer-algorithm"
+ HEADER_SSEC_KEY_AMZ = "x-amz-server-side-encryption-customer-key"
+ HEADER_SSEC_KEY_MD5_AMZ = "x-amz-server-side-encryption-customer-key-MD5"
+
+ HEADER_SSEC_COPY_SOURCE_ENCRYPTION_AMZ = "x-amz-copy-source-server-side-encryption-customer-algorithm"
+ HEADER_SSEC_COPY_SOURCE_KEY_AMZ = "x-amz-copy-source-server-side-encryption-customer-key"
+ HEADER_SSEC_COPY_SOURCE_KEY_MD5_AMZ = "x-amz-copy-source-server-side-encryption-customer-key-MD5"
+
+ HEADER_SSEKMS_ENCRYPTION_AMZ = "x-amz-server-side-encryption"
+ HEADER_SSEKMS_KEY_AMZ = "x-amz-server-side-encryption-aws-kms-key-id"
+
+ HEADER_DATE_CAMEL = "Date"
+ HEADER_HOST_CAMEL = "Host"
+ HEADER_HOST = "host"
+ HEADER_AUTH_CAMEL = "Authorization"
+ HEADER_MD5_CAMEL = "Content-MD5"
+ HEADER_LOCATION_CAMEL = "Location"
+ HEADER_CONTENT_LENGTH_CAMEL = "Content-Length"
+ HEADER_CONTENT_TYPE_CAML = "Content-Type"
+ HEADER_USER_AGENT_CAMEL = "User-Agent"
+ HEADER_ORIGIN_CAMEL = "Origin"
+ HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL = "Access-Control-Request-Headers"
+
+ PARAM_VERSION_ID = "versionId"
+ PARAM_RESPONSE_CONTENT_TYPE = "response-content-type"
+ PARAM_RESPONSE_CONTENT_LANGUAGE = "response-content-language"
+ PARAM_RESPONSE_EXPIRES = "response-expires"
+ PARAM_RESPONSE_CACHE_CONTROL = "response-cache-control"
+ PARAM_RESPONSE_CONTENT_DISPOSITION = "response-content-disposition"
+ PARAM_RESPONSE_CONTENT_ENCODING = "response-content-encoding"
+ PARAM_IMAGE_PROCESS = "x-image-process"
+
+ PARAM_ALGORITHM_AMZ_CAMEL = "X-Amz-Algorithm"
+ PARAM_CREDENTIAL_AMZ_CAMEL = "X-Amz-Credential"
+ PARAM_DATE_AMZ_CAMEL = "X-Amz-Date"
+ PARAM_EXPIRES_AMZ_CAMEL = "X-Amz-Expires"
+ PARAM_SIGNEDHEADERS_AMZ_CAMEL = "X-Amz-SignedHeaders"
+ PARAM_SIGNATURE_AMZ_CAMEL = "X-Amz-Signature"
+
+ DEFAULT_SIGNATURE = SignatureV2
+ DEFAULT_REGION = "region"
+ DEFAULT_CONNECT_TIMEOUT = 60
+ DEFAULT_SOCKET_TIMEOUT = 60
+ DEFAULT_HEADER_TIMEOUT = 60
+ DEFAULT_IDLE_CONN_TIMEOUT = 30
+ DEFAULT_MAX_RETRY_COUNT = 3
+ DEFAULT_MAX_CONN_PER_HOST = 1000
+ EMPTY_CONTENT_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"
+ LONG_DATE_FORMAT = "20060102T150405Z"
+ SHORT_DATE_FORMAT = "20060102"
+ ISO8601_DATE_FORMAT = "2006-01-02T15:04:05Z"
+ ISO8601_MIDNIGHT_DATE_FORMAT = "2006-01-02T00:00:00Z"
+ RFC1123_FORMAT = "Mon, 02 Jan 2006 15:04:05 GMT"
+
+ V4_SERVICE_NAME = "s3"
+ V4_SERVICE_SUFFIX = "aws4_request"
+
+ V2_HASH_PREFIX = "AWS"
+ V4_HASH_PREFIX = "AWS4-HMAC-SHA256"
+ V4_HASH_PRE = "AWS4"
+
+ DEFAULT_SSE_KMS_ENCRYPTION = "aws:kms"
+ DEFAULT_SSE_C_ENCRYPTION = "AES256"
+
+ HTTP_GET = "GET"
+ HTTP_POST = "POST"
+ HTTP_PUT = "PUT"
+ HTTP_DELETE = "DELETE"
+ HTTP_HEAD = "HEAD"
+ HTTP_OPTIONS = "OPTIONS"
+)
+
+type SignatureType string
+
+const (
+ SignatureV2 SignatureType = "v2"
+ SignatureV4 SignatureType = "v4"
+)
+
+var (
+ interested_headers = []string{"content-md5", "content-type", "date"}
+
+ allowed_response_http_header_metadata_names = map[string]bool{
+ "content-type": true,
+ "content-md5": true,
+ "content-length": true,
+ "content-language": true,
+ "expires": true,
+ "origin": true,
+ "cache-control": true,
+ "content-disposition": true,
+ "content-encoding": true,
+ "x-default-storage-class": true,
+ "location": true,
+ "date": true,
+ "etag": true,
+ "host": true,
+ "last-modified": true,
+ "content-range": true,
+ "x-reserved": true,
+ "access-control-allow-origin": true,
+ "access-control-allow-headers": true,
+ "access-control-max-age": true,
+ "access-control-allow-methods": true,
+ "access-control-expose-headers": true,
+ "connection": true,
+ }
+
+ allowed_request_http_header_metadata_names = map[string]bool{
+ "content-type": true,
+ "content-md5": true,
+ "content-length": true,
+ "content-language": true,
+ "expires": true,
+ "origin": true,
+ "cache-control": true,
+ "content-disposition": true,
+ "content-encoding": true,
+ "access-control-request-method": true,
+ "access-control-request-headers": true,
+ "x-default-storage-class": true,
+ "location": true,
+ "date": true,
+ "etag": true,
+ "range": true,
+ "host": true,
+ "if-modified-since": true,
+ "if-unmodified-since": true,
+ "if-match": true,
+ "if-none-match": true,
+ "last-modified": true,
+ "content-range": true,
+ }
+
+ allowed_resource_parameter_names = map[string]bool{
+ "acl": true,
+ "policy": true,
+ "torrent": true,
+ "logging": true,
+ "location": true,
+ "storageinfo": true,
+ "quota": true,
+ "storagepolicy": true,
+ "requestpayment": true,
+ "versions": true,
+ "versioning": true,
+ "versionid": true,
+ "uploads": true,
+ "uploadid": true,
+ "partnumber": true,
+ "website": true,
+ "notification": true,
+ "lifecycle": true,
+ "deletebucket": true,
+ "delete": true,
+ "cors": true,
+ "restore": true,
+ "tagging": true,
+ "response-content-type": true,
+ "response-content-language": true,
+ "response-expires": true,
+ "response-cache-control": true,
+ "response-content-disposition": true,
+ "response-content-encoding": true,
+ "x-image-process": true,
+ }
+
+ mime_types = map[string]string{
+ "7z": "application/x-7z-compressed",
+ "aac": "audio/x-aac",
+ "ai": "application/postscript",
+ "aif": "audio/x-aiff",
+ "asc": "text/plain",
+ "asf": "video/x-ms-asf",
+ "atom": "application/atom+xml",
+ "avi": "video/x-msvideo",
+ "bmp": "image/bmp",
+ "bz2": "application/x-bzip2",
+ "cer": "application/pkix-cert",
+ "crl": "application/pkix-crl",
+ "crt": "application/x-x509-ca-cert",
+ "css": "text/css",
+ "csv": "text/csv",
+ "cu": "application/cu-seeme",
+ "deb": "application/x-debian-package",
+ "doc": "application/msword",
+ "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ "dvi": "application/x-dvi",
+ "eot": "application/vnd.ms-fontobject",
+ "eps": "application/postscript",
+ "epub": "application/epub+zip",
+ "etx": "text/x-setext",
+ "flac": "audio/flac",
+ "flv": "video/x-flv",
+ "gif": "image/gif",
+ "gz": "application/gzip",
+ "htm": "text/html",
+ "html": "text/html",
+ "ico": "image/x-icon",
+ "ics": "text/calendar",
+ "ini": "text/plain",
+ "iso": "application/x-iso9660-image",
+ "jar": "application/java-archive",
+ "jpe": "image/jpeg",
+ "jpeg": "image/jpeg",
+ "jpg": "image/jpeg",
+ "js": "text/javascript",
+ "json": "application/json",
+ "latex": "application/x-latex",
+ "log": "text/plain",
+ "m4a": "audio/mp4",
+ "m4v": "video/mp4",
+ "mid": "audio/midi",
+ "midi": "audio/midi",
+ "mov": "video/quicktime",
+ "mp3": "audio/mpeg",
+ "mp4": "video/mp4",
+ "mp4a": "audio/mp4",
+ "mp4v": "video/mp4",
+ "mpe": "video/mpeg",
+ "mpeg": "video/mpeg",
+ "mpg": "video/mpeg",
+ "mpg4": "video/mp4",
+ "oga": "audio/ogg",
+ "ogg": "audio/ogg",
+ "ogv": "video/ogg",
+ "ogx": "application/ogg",
+ "pbm": "image/x-portable-bitmap",
+ "pdf": "application/pdf",
+ "pgm": "image/x-portable-graymap",
+ "png": "image/png",
+ "pnm": "image/x-portable-anymap",
+ "ppm": "image/x-portable-pixmap",
+ "ppt": "application/vnd.ms-powerpoint",
+ "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ "ps": "application/postscript",
+ "qt": "video/quicktime",
+ "rar": "application/x-rar-compressed",
+ "ras": "image/x-cmu-raster",
+ "rss": "application/rss+xml",
+ "rtf": "application/rtf",
+ "sgm": "text/sgml",
+ "sgml": "text/sgml",
+ "svg": "image/svg+xml",
+ "swf": "application/x-shockwave-flash",
+ "tar": "application/x-tar",
+ "tif": "image/tiff",
+ "tiff": "image/tiff",
+ "torrent": "application/x-bittorrent",
+ "ttf": "application/x-font-ttf",
+ "txt": "text/plain",
+ "wav": "audio/x-wav",
+ "webm": "video/webm",
+ "wma": "audio/x-ms-wma",
+ "wmv": "video/x-ms-wmv",
+ "woff": "application/x-font-woff",
+ "wsdl": "application/wsdl+xml",
+ "xbm": "image/x-xbitmap",
+ "xls": "application/vnd.ms-excel",
+ "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ "xml": "application/xml",
+ "xpm": "image/x-xpixmap",
+ "xwd": "image/x-xwindowdump",
+ "yaml": "text/yaml",
+ "yml": "text/yaml",
+ "zip": "application/zip",
+ }
+)
+
+type HttpMethodType string
+
+const (
+ HttpMethodGet HttpMethodType = HTTP_GET
+ HttpMethodPut HttpMethodType = HTTP_PUT
+ HttpMethodPost HttpMethodType = HTTP_POST
+ HttpMethodDelete HttpMethodType = HTTP_DELETE
+ HttpMethodHead HttpMethodType = HTTP_HEAD
+ HttpMethodOptions HttpMethodType = HTTP_OPTIONS
+)
+
+type SubResourceType string
+
+const (
+ SubResourceStoragePolicy SubResourceType = "storagePolicy"
+ SubResourceQuota SubResourceType = "quota"
+ SubResourceStorageInfo SubResourceType = "storageinfo"
+ SubResourceLocation SubResourceType = "location"
+ SubResourceAcl SubResourceType = "acl"
+ SubResourcePolicy SubResourceType = "policy"
+ SubResourceCors SubResourceType = "cors"
+ SubResourceVersioning SubResourceType = "versioning"
+ SubResourceWebsite SubResourceType = "website"
+ SubResourceLogging SubResourceType = "logging"
+ SubResourceLifecycle SubResourceType = "lifecycle"
+ SubResourceNotification SubResourceType = "notification"
+ SubResourceTagging SubResourceType = "tagging"
+ SubResourceDelete SubResourceType = "delete"
+ SubResourceVersions SubResourceType = "versions"
+ SubResourceUploads SubResourceType = "uploads"
+ SubResourceRestore SubResourceType = "restore"
+)
+
+type AclType string
+
+const (
+ AclPrivate AclType = "private"
+ AclPublicRead AclType = "public-read"
+ AclPublicReadWrite AclType = "public-read-write"
+ AclAuthenticatedRead AclType = "authenticated-read"
+ AclBucketOwnerRead AclType = "bucket-owner-read"
+ AclBucketOwnerFullControl AclType = "bucket-owner-full-control"
+ AclLogDeliveryWrite AclType = "log-delivery-write"
+)
+
+type StorageClassType string
+
+const (
+ StorageClassStandard StorageClassType = "STANDARD"
+ StorageClassWarm StorageClassType = "STANDARD_IA"
+ StorageClassCold StorageClassType = "GLACIER"
+)
+
+type PermissionType string
+
+const (
+ PermissionRead PermissionType = "READ"
+ PermissionWrite PermissionType = "WRITE"
+ PermissionReadAcp PermissionType = "READ_ACP"
+ PermissionWriteAcp PermissionType = "WRITE_ACP"
+ PermissionFullControl PermissionType = "FULL_CONTROL"
+)
+
+type GranteeType string
+
+const (
+ GranteeGroup GranteeType = "Group"
+ GranteeUser GranteeType = "CanonicalUser"
+)
+
+type GroupUriType string
+
+const (
+ GroupAllUsers GroupUriType = "http://acs.amazonaws.com/groups/global/AllUsers"
+ GroupAuthenticatedUsers GroupUriType = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
+ GroupLogDelivery GroupUriType = "http://acs.amazonaws.com/groups/s3/LogDelivery"
+)
+
+type VersioningStatusType string
+
+const (
+ VersioningStatusEnabled VersioningStatusType = "Enabled"
+ VersioningStatusSuspended VersioningStatusType = "Suspended"
+)
+
+type ProtocolType string
+
+const (
+ ProtocolHttp ProtocolType = "http"
+ ProtocolHttps ProtocolType = "https"
+)
+
+type RuleStatusType string
+
+const (
+ RuleStatusEnabled RuleStatusType = "Enabled"
+ RuleStatusDisabled RuleStatusType = "Disabled"
+)
+
+type RestoreTierType string
+
+const (
+ RestoreTierExpedited RestoreTierType = "Expedited"
+ RestoreTierStandard RestoreTierType = "Standard"
+ RestoreTierBulk RestoreTierType = "Bulk"
+)
+
+type MetadataDirectiveType string
+
+const (
+ CopyMetadata MetadataDirectiveType = "COPY"
+ ReplaceMetadata MetadataDirectiveType = "REPLACE"
+)
diff --git a/openstack/obs/convert.go b/openstack/obs/convert.go
new file mode 100644
index 000000000..f4ab93d37
--- /dev/null
+++ b/openstack/obs/convert.go
@@ -0,0 +1,568 @@
+package obs
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strings"
+ "time"
+)
+
+func cleanHeaderPrefix(header http.Header) map[string][]string {
+ responseHeaders := make(map[string][]string)
+ for key, value := range header {
+ if len(value) > 0 {
+ key = strings.ToLower(key)
+ if strings.HasPrefix(key, HEADER_PREFIX) {
+ key = key[len(HEADER_PREFIX):]
+ }
+ responseHeaders[key] = value
+ }
+ }
+ return responseHeaders
+}
+
+func ParseStringToStorageClassType(value string) (ret StorageClassType) {
+ switch value {
+ case "STANDARD":
+ ret = StorageClassStandard
+ case "STANDARD_IA":
+ ret = StorageClassWarm
+ case "GLACIER":
+ ret = StorageClassCold
+ default:
+ ret = ""
+ }
+ return
+}
+
+func convertGrantToXml(grant Grant) string {
+ xml := make([]string, 0, 4)
+ xml = append(xml, fmt.Sprintf("", grant.Grantee.Type))
+ if grant.Grantee.Type == GranteeUser {
+ xml = append(xml, fmt.Sprintf("%s", grant.Grantee.ID))
+ if grant.Grantee.DisplayName != "" {
+ xml = append(xml, fmt.Sprintf("%s", grant.Grantee.DisplayName))
+ }
+ } else {
+ xml = append(xml, fmt.Sprintf("%s", grant.Grantee.URI))
+ }
+ xml = append(xml, fmt.Sprintf("%s", grant.Permission))
+ return strings.Join(xml, "")
+}
+
+func ConvertLoggingStatusToXml(input BucketLoggingStatus, returnMd5 bool) (data string, md5 string) {
+ grantsLength := len(input.TargetGrants)
+ xml := make([]string, 0, 8+grantsLength)
+
+ xml = append(xml, "")
+ if input.TargetBucket != "" || input.TargetPrefix != "" {
+ xml = append(xml, "")
+ xml = append(xml, fmt.Sprintf("%s", input.TargetBucket))
+ xml = append(xml, fmt.Sprintf("%s", input.TargetPrefix))
+
+ if grantsLength > 0 {
+ xml = append(xml, "")
+ for _, grant := range input.TargetGrants {
+ xml = append(xml, convertGrantToXml(grant))
+ }
+ xml = append(xml, "")
+ }
+
+ xml = append(xml, "")
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func ConvertAclToXml(input AccessControlPolicy, returnMd5 bool) (data string, md5 string) {
+ xml := make([]string, 0, 4+len(input.Grants))
+ xml = append(xml, fmt.Sprintf("%s", input.Owner.ID))
+ if input.Owner.DisplayName != "" {
+ xml = append(xml, fmt.Sprintf("%s", input.Owner.DisplayName))
+ }
+ xml = append(xml, "")
+ for _, grant := range input.Grants {
+ xml = append(xml, convertGrantToXml(grant))
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func convertConditionToXml(condition Condition) string {
+ xml := make([]string, 0, 2)
+ if condition.KeyPrefixEquals != "" {
+ xml = append(xml, fmt.Sprintf("%s", condition.KeyPrefixEquals))
+ }
+ if condition.HttpErrorCodeReturnedEquals != "" {
+ xml = append(xml, fmt.Sprintf("%s", condition.HttpErrorCodeReturnedEquals))
+ }
+ if len(xml) > 0 {
+ return fmt.Sprintf("%s", strings.Join(xml, ""))
+ }
+ return ""
+}
+
+func ConvertWebsiteConfigurationToXml(input BucketWebsiteConfiguration, returnMd5 bool) (data string, md5 string) {
+ routingRuleLength := len(input.RoutingRules)
+ xml := make([]string, 0, 6+routingRuleLength*10)
+ xml = append(xml, "")
+
+ if input.RedirectAllRequestsTo.HostName != "" {
+ xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.HostName))
+ if input.RedirectAllRequestsTo.Protocol != "" {
+ xml = append(xml, fmt.Sprintf("%s", input.RedirectAllRequestsTo.Protocol))
+ }
+ xml = append(xml, "")
+ } else {
+ xml = append(xml, fmt.Sprintf("%s", input.IndexDocument.Suffix))
+ if input.ErrorDocument.Key != "" {
+ xml = append(xml, fmt.Sprintf("%s", input.ErrorDocument.Key))
+ }
+ if routingRuleLength > 0 {
+ xml = append(xml, "")
+ for _, routingRule := range input.RoutingRules {
+ xml = append(xml, "")
+ xml = append(xml, "")
+ if routingRule.Redirect.Protocol != "" {
+ xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.Protocol))
+ }
+ if routingRule.Redirect.HostName != "" {
+ xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HostName))
+ }
+ if routingRule.Redirect.ReplaceKeyPrefixWith != "" {
+ xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.ReplaceKeyPrefixWith))
+ }
+
+ if routingRule.Redirect.ReplaceKeyWith != "" {
+ xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.ReplaceKeyWith))
+ }
+ if routingRule.Redirect.HttpRedirectCode != "" {
+ xml = append(xml, fmt.Sprintf("%s", routingRule.Redirect.HttpRedirectCode))
+ }
+ xml = append(xml, "")
+
+ if ret := convertConditionToXml(routingRule.Condition); ret != "" {
+ xml = append(xml, ret)
+ }
+ xml = append(xml, "")
+ }
+ xml = append(xml, "")
+ }
+ }
+
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func convertTransitionsToXml(transitions []Transition) string {
+ if length := len(transitions); length > 0 {
+ xml := make([]string, 0, length)
+ for _, transition := range transitions {
+ var temp string
+ if transition.Days > 0 {
+ temp = fmt.Sprintf("%d", transition.Days)
+ } else if !transition.Date.IsZero() {
+ temp = fmt.Sprintf("%s", transition.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT))
+ }
+ if temp != "" {
+ xml = append(xml, fmt.Sprintf("%s%s", temp, transition.StorageClass))
+ }
+ }
+ return strings.Join(xml, "")
+ }
+ return ""
+}
+
+func convertExpirationToXml(expiration Expiration) string {
+ if expiration.Days > 0 {
+ return fmt.Sprintf("%d", expiration.Days)
+ } else if !expiration.Date.IsZero() {
+ return fmt.Sprintf("%s", expiration.Date.UTC().Format(ISO8601_MIDNIGHT_DATE_FORMAT))
+ }
+ return ""
+}
+func convertNoncurrentVersionTransitionsToXml(noncurrentVersionTransitions []NoncurrentVersionTransition) string {
+ if length := len(noncurrentVersionTransitions); length > 0 {
+ xml := make([]string, 0, length)
+ for _, noncurrentVersionTransition := range noncurrentVersionTransitions {
+ if noncurrentVersionTransition.NoncurrentDays > 0 {
+ xml = append(xml, fmt.Sprintf("%d"+
+ "%s",
+ noncurrentVersionTransition.NoncurrentDays, noncurrentVersionTransition.StorageClass))
+ }
+ }
+ return strings.Join(xml, "")
+ }
+ return ""
+}
+func convertNoncurrentVersionExpirationToXml(noncurrentVersionExpiration NoncurrentVersionExpiration) string {
+ if noncurrentVersionExpiration.NoncurrentDays > 0 {
+ return fmt.Sprintf("%d", noncurrentVersionExpiration.NoncurrentDays)
+ }
+ return ""
+}
+
+func ConvertLifecyleConfigurationToXml(input BucketLifecyleConfiguration, returnMd5 bool) (data string, md5 string) {
+ xml := make([]string, 0, 2+len(input.LifecycleRules)*9)
+ xml = append(xml, "")
+ for _, lifecyleRule := range input.LifecycleRules {
+ xml = append(xml, "")
+ if lifecyleRule.ID != "" {
+ xml = append(xml, fmt.Sprintf("%s", lifecyleRule.ID))
+ }
+ xml = append(xml, fmt.Sprintf("%s", lifecyleRule.Prefix))
+ xml = append(xml, fmt.Sprintf("%s", lifecyleRule.Status))
+ if ret := convertTransitionsToXml(lifecyleRule.Transitions); ret != "" {
+ xml = append(xml, ret)
+ }
+ if ret := convertExpirationToXml(lifecyleRule.Expiration); ret != "" {
+ xml = append(xml, ret)
+ }
+ if ret := convertNoncurrentVersionTransitionsToXml(lifecyleRule.NoncurrentVersionTransitions); ret != "" {
+ xml = append(xml, ret)
+ }
+ if ret := convertNoncurrentVersionExpirationToXml(lifecyleRule.NoncurrentVersionExpiration); ret != "" {
+ xml = append(xml, ret)
+ }
+ xml = append(xml, "")
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func converntFilterRulesToXml(filterRules []FilterRule) string {
+ if length := len(filterRules); length > 0 {
+ xml := make([]string, 0, length*4)
+ for _, filterRule := range filterRules {
+ xml = append(xml, "")
+ if filterRule.Name != "" {
+ xml = append(xml, fmt.Sprintf("%s", filterRule.Name))
+ }
+ if filterRule.Value != "" {
+ xml = append(xml, fmt.Sprintf("%s", filterRule.Value))
+ }
+ xml = append(xml, "")
+ }
+ return fmt.Sprintf("%s", strings.Join(xml, ""))
+ }
+ return ""
+}
+
+func converntEventsToXml(events []string) string {
+ if length := len(events); length > 0 {
+ xml := make([]string, 0, length)
+ for _, event := range events {
+ xml = append(xml, fmt.Sprintf("%s", event))
+ }
+ return strings.Join(xml, "")
+ }
+ return ""
+}
+
+func ConvertNotificationToXml(input BucketNotification, returnMd5 bool) (data string, md5 string) {
+ xml := make([]string, 0, 2+len(input.TopicConfigurations)*6)
+ xml = append(xml, "")
+ for _, topicConfiguration := range input.TopicConfigurations {
+ xml = append(xml, "")
+ if topicConfiguration.ID != "" {
+ xml = append(xml, fmt.Sprintf("%s", topicConfiguration.ID))
+ }
+ xml = append(xml, fmt.Sprintf("%s", topicConfiguration.Topic))
+
+ if ret := converntEventsToXml(topicConfiguration.Events); ret != "" {
+ xml = append(xml, ret)
+ }
+ if ret := converntFilterRulesToXml(topicConfiguration.FilterRules); ret != "" {
+ xml = append(xml, ret)
+ }
+ xml = append(xml, "")
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func ConvertCompleteMultipartUploadInputToXml(input CompleteMultipartUploadInput, returnMd5 bool) (data string, md5 string) {
+ xml := make([]string, 0, 2+len(input.Parts)*4)
+ xml = append(xml, "")
+ for _, part := range input.Parts {
+ xml = append(xml, "")
+ xml = append(xml, fmt.Sprintf("%d", part.PartNumber))
+ xml = append(xml, fmt.Sprintf("%s", part.ETag))
+ xml = append(xml, "")
+ }
+ xml = append(xml, "")
+ data = strings.Join(xml, "")
+ if returnMd5 {
+ md5 = Base64Md5([]byte(data))
+ }
+ return
+}
+
+func parseSseHeader(responseHeaders map[string][]string) (sseHeader ISseHeader) {
+ if ret, ok := responseHeaders[HEADER_SSEC_ENCRYPTION]; ok {
+ sseCHeader := SseCHeader{Encryption: ret[0]}
+ if ret, ok = responseHeaders[HEADER_SSEC_KEY_MD5]; ok {
+ sseCHeader.KeyMD5 = ret[0]
+ }
+ sseHeader = sseCHeader
+ } else if ret, ok := responseHeaders[HEADER_SSEKMS_ENCRYPTION]; ok {
+ sseKmsHeader := SseKmsHeader{Encryption: ret[0]}
+ if ret, ok = responseHeaders[HEADER_SSEKMS_KEY]; ok {
+ sseKmsHeader.Key = ret[0]
+ }
+ sseHeader = sseKmsHeader
+ }
+ return
+}
+
+func ParseGetObjectMetadataOutput(output *GetObjectMetadataOutput) {
+ if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_WEBSITE_REDIRECT_LOCATION]; ok {
+ output.WebsiteRedirectLocation = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_EXPIRATION]; ok {
+ output.Expiration = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_RESTORE]; ok {
+ output.Restore = ret[0]
+ }
+
+ if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
+ output.StorageClass = ParseStringToStorageClassType(ret[0])
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
+ output.ETag = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_TYPE]; ok {
+ output.ContentType = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN]; ok {
+ output.AllowOrigin = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_HEADERS]; ok {
+ output.AllowHeader = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_MAX_AGE]; ok {
+ output.MaxAgeSeconds = StringToInt(ret[0], 0)
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_METHODS]; ok {
+ output.AllowMethod = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS]; ok {
+ output.ExposeHeader = ret[0]
+ }
+
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_LASTMODIFIED]; ok {
+ ret, err := time.Parse(time.RFC1123, ret[0])
+ if err == nil {
+ output.LastModified = ret
+ }
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LENGTH]; ok {
+ output.ContentLength = StringToInt64(ret[0], 0)
+ }
+
+ output.Metadata = make(map[string]string)
+
+ for key, value := range output.ResponseHeaders {
+ if strings.HasPrefix(key, PREFIX_META) {
+ _key := key[len(PREFIX_META):]
+ output.ResponseHeaders[_key] = value
+ output.Metadata[_key] = value[0]
+ delete(output.ResponseHeaders, key)
+ }
+ }
+
+}
+
+func ParseCopyObjectOutput(output *CopyObjectOutput) {
+ if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = ret[0]
+ }
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_COPY_SOURCE_VERSION_ID]; ok {
+ output.CopySourceVersionId = ret[0]
+ }
+}
+
+func ParsePutObjectOutput(output *PutObjectOutput) {
+ if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = ret[0]
+ }
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS2]; ok {
+ output.StorageClass = ParseStringToStorageClassType(ret[0])
+ }
+
+ if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
+ output.ETag = ret[0]
+ }
+}
+
+func ParseInitiateMultipartUploadOutput(output *InitiateMultipartUploadOutput) {
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+}
+
+func ParseUploadPartOutput(output *UploadPartOutput) {
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_ETAG]; ok {
+ output.ETag = ret[0]
+ }
+}
+
+func ParseCompleteMultipartUploadOutput(output *CompleteMultipartUploadOutput) {
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+ if ret, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = ret[0]
+ }
+}
+
+func ParseCopyPartOutput(output *CopyPartOutput) {
+ output.SseHeader = parseSseHeader(output.ResponseHeaders)
+}
+
+func ParseGetBucketMetadataOutput(output *GetBucketMetadataOutput) {
+ if ret, ok := output.ResponseHeaders[HEADER_STORAGE_CLASS]; ok {
+ output.StorageClass = ParseStringToStorageClassType(ret[0])
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_ORIGIN]; ok {
+ output.AllowOrigin = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_HEADERS]; ok {
+ output.AllowHeader = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_MAX_AGE]; ok {
+ output.MaxAgeSeconds = StringToInt(ret[0], 0)
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_ALLOW_METHODS]; ok {
+ output.AllowMethod = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_ACCESS_CONRTOL_EXPOSE_HEADERS]; ok {
+ output.ExposeHeader = ret[0]
+ }
+}
+
+func ParseDeleteObjectOutput(output *DeleteObjectOutput) {
+ if versionId, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = versionId[0]
+ }
+
+ if deleteMarker, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok {
+ output.DeleteMarker = deleteMarker[0] == "true"
+ }
+}
+
+func ParseGetObjectOutput(output *GetObjectOutput) {
+ ParseGetObjectMetadataOutput(&output.GetObjectMetadataOutput)
+ if ret, ok := output.ResponseHeaders[HEADER_DELETE_MARKER]; ok {
+ output.DeleteMarker = ret[0] == "true"
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CACHE_CONTROL]; ok {
+ output.CacheControl = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_DISPOSITION]; ok {
+ output.ContentDisposition = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_ENCODING]; ok {
+ output.ContentEncoding = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_CONTENT_LANGUAGE]; ok {
+ output.ContentLanguage = ret[0]
+ }
+ if ret, ok := output.ResponseHeaders[HEADER_EXPIRES]; ok {
+ output.Expires = ret[0]
+ }
+}
+
+func ConvertRequestToIoReaderV2(req interface{}) (io.Reader, string, error) {
+ data, err := TransToXml(req)
+ if err == nil {
+ if isDebugLogEnabled() {
+ doLog(LEVEL_DEBUG, "Do http request with data: %s", string(data))
+ }
+ return bytes.NewReader(data), Base64Md5(data), nil
+ }
+ return nil, "", err
+}
+
+func ConvertRequestToIoReader(req interface{}) (io.Reader, error) {
+ body, err := TransToXml(req)
+ if err == nil {
+ if isDebugLogEnabled() {
+ doLog(LEVEL_DEBUG, "Do http request with data: %s", string(body))
+ }
+ return bytes.NewReader(body), nil
+ }
+ return nil, err
+}
+
+func ParseResponseToBaseModel(resp *http.Response, baseModel IBaseModel, xmlResult bool) (err error) {
+ readCloser, ok := baseModel.(IReadCloser)
+ if !ok {
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err == nil && len(body) > 0 {
+ if xmlResult {
+ err = ParseXml(body, baseModel)
+ if err != nil {
+ doLog(LEVEL_ERROR, "Unmarshal error: %v", err)
+ }
+ } else {
+ s := reflect.TypeOf(baseModel).Elem()
+ for i := 0; i < s.NumField(); i++ {
+ if s.Field(i).Tag == "body" {
+ reflect.ValueOf(baseModel).Elem().FieldByName(s.Field(i).Name).SetString(string(body))
+ break
+ }
+ }
+ }
+ }
+ } else {
+ readCloser.setReadCloser(resp.Body)
+ }
+
+ baseModel.setStatusCode(resp.StatusCode)
+ responseHeaders := cleanHeaderPrefix(resp.Header)
+ baseModel.setResponseHeaders(responseHeaders)
+ if values, ok := responseHeaders[HEADER_REQUEST_ID]; ok {
+ baseModel.setRequestId(values[0])
+ }
+ return
+}
+
+func ParseResponseToObsError(resp *http.Response) error {
+ obsError := ObsError{}
+ ParseResponseToBaseModel(resp, &obsError, true)
+ obsError.Status = resp.Status
+ return obsError
+}
diff --git a/openstack/obs/error.go b/openstack/obs/error.go
new file mode 100644
index 000000000..c79087ec0
--- /dev/null
+++ b/openstack/obs/error.go
@@ -0,0 +1,21 @@
+package obs
+
+import (
+ "encoding/xml"
+ "fmt"
+)
+
+type ObsError struct {
+ BaseModel
+ Status string
+ XMLName xml.Name `xml:"Error"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+ Resource string `xml:"Resource"`
+ HostId string `xml:"HostId"`
+}
+
+func (err ObsError) Error() string {
+ return fmt.Sprintf("obs: service returned error: Status=%s, Code=%s, Message=%s, RequestId=%s",
+ err.Status, err.Code, err.Message, err.RequestId)
+}
diff --git a/openstack/obs/http.go b/openstack/obs/http.go
new file mode 100644
index 000000000..9b3a51077
--- /dev/null
+++ b/openstack/obs/http.go
@@ -0,0 +1,408 @@
+package obs
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+)
+
+func prepareHeaders(headers map[string][]string, meta bool) map[string][]string {
+ _headers := make(map[string][]string, len(headers))
+ if headers != nil {
+ for key, value := range headers {
+ key = strings.TrimSpace(key)
+ if key == "" {
+ continue
+ }
+ _key := strings.ToLower(key)
+ if _, ok := allowed_request_http_header_metadata_names[_key]; !ok && !strings.HasPrefix(key, HEADER_PREFIX) {
+ if !meta {
+ continue
+ }
+ _key = HEADER_PREFIX_META + _key
+ } else {
+ _key = key
+ }
+ _headers[_key] = value
+ }
+ }
+ return _headers
+}
+
+func (obsClient ObsClient) doActionWithoutBucket(action, method string, input ISerializable, output IBaseModel) error {
+ return obsClient.doAction(action, method, "", "", input, output, true, true)
+}
+
+func (obsClient ObsClient) doActionWithBucketV2(action, method, bucketName string, input ISerializable, output IBaseModel) error {
+ if strings.TrimSpace(bucketName) == "" {
+ return errors.New("Bucket is empty")
+ }
+ return obsClient.doAction(action, method, bucketName, "", input, output, false, true)
+}
+
+func (obsClient ObsClient) doActionWithBucket(action, method, bucketName string, input ISerializable, output IBaseModel) error {
+ if strings.TrimSpace(bucketName) == "" {
+ return errors.New("Bucket is empty")
+ }
+ return obsClient.doAction(action, method, bucketName, "", input, output, true, true)
+}
+
+func (obsClient ObsClient) doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel) error {
+ return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, true)
+}
+
+func (obsClient ObsClient) doActionWithBucketAndKeyUnRepeatable(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel) error {
+ return obsClient._doActionWithBucketAndKey(action, method, bucketName, objectKey, input, output, false)
+}
+
+func (obsClient ObsClient) _doActionWithBucketAndKey(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, repeatable bool) error {
+ if strings.TrimSpace(bucketName) == "" {
+ return errors.New("Key is empty")
+ }
+ if strings.TrimSpace(objectKey) == "" {
+ return errors.New("Key is empty")
+ }
+ return obsClient.doAction(action, method, bucketName, objectKey, input, output, true, repeatable)
+}
+
+func (obsClient ObsClient) doAction(action, method, bucketName, objectKey string, input ISerializable, output IBaseModel, xmlResult bool, repeatable bool) error {
+
+ var resp *http.Response
+ var respError error
+ doLog(LEVEL_INFO, "Enter method %s...", action)
+ start := GetCurrentTimestamp()
+
+ params, headers, data := input.trans()
+
+ if params == nil {
+ params = make(map[string]string)
+ }
+
+ if headers == nil {
+ headers = make(map[string][]string)
+ }
+
+ switch method {
+ case HTTP_GET:
+ resp, respError = obsClient.doHttpGet(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_POST:
+ resp, respError = obsClient.doHttpPost(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_PUT:
+ resp, respError = obsClient.doHttpPut(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_DELETE:
+ resp, respError = obsClient.doHttpDelete(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_HEAD:
+ resp, respError = obsClient.doHttpHead(bucketName, objectKey, params, headers, data, repeatable)
+ case HTTP_OPTIONS:
+ resp, respError = obsClient.doHttpOptions(bucketName, objectKey, params, headers, data, repeatable)
+ default:
+ respError = errors.New("Unexpect http method error")
+ }
+ if respError == nil && output != nil {
+ respError = ParseResponseToBaseModel(resp, output, xmlResult)
+ if respError != nil {
+ doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
+ }
+ } else {
+ doLog(LEVEL_WARN, "Do http request with error: %v", respError)
+ }
+ doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
+
+ return respError
+}
+
+func (obsClient ObsClient) doHttpGet(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHttp(HTTP_GET, bucketName, objectKey, params, prepareHeaders(headers, false), data, repeatable)
+}
+
+func (obsClient ObsClient) doHttpHead(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHttp(HTTP_HEAD, bucketName, objectKey, params, prepareHeaders(headers, false), data, repeatable)
+}
+
+func (obsClient ObsClient) doHttpOptions(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHttp(HTTP_OPTIONS, bucketName, objectKey, params, prepareHeaders(headers, false), data, repeatable)
+}
+
+func (obsClient ObsClient) doHttpDelete(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHttp(HTTP_DELETE, bucketName, objectKey, params, prepareHeaders(headers, false), data, repeatable)
+}
+
+func (obsClient ObsClient) doHttpPut(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHttp(HTTP_PUT, bucketName, objectKey, params, prepareHeaders(headers, true), data, repeatable)
+}
+
+func (obsClient ObsClient) doHttpPost(bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (*http.Response, error) {
+ return obsClient.doHttp(HTTP_POST, bucketName, objectKey, params, prepareHeaders(headers, true), data, repeatable)
+}
+
+func (obsClient ObsClient) doHttpWithSignedUrl(action, method string, signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader, output IBaseModel, xmlResult bool) (respError error) {
+ req, err := http.NewRequest(method, signedUrl, data)
+ if err != nil {
+ return err
+ }
+
+ var resp *http.Response
+
+ doLog(LEVEL_INFO, "Do %s with signedUrl %s...", action, signedUrl)
+
+ req.Header = actualSignedRequestHeaders
+ if value, ok := req.Header[HEADER_HOST_CAMEL]; ok {
+ req.Host = value[0]
+ delete(req.Header, HEADER_HOST_CAMEL)
+ } else if value, ok := req.Header[HEADER_HOST]; ok {
+ req.Host = value[0]
+ delete(req.Header, HEADER_HOST)
+ }
+
+ if value, ok := req.Header[HEADER_CONTENT_LENGTH_CAMEL]; ok {
+ req.ContentLength = StringToInt64(value[0], -1)
+ delete(req.Header, HEADER_CONTENT_LENGTH_CAMEL)
+ } else if value, ok := req.Header[HEADER_CONTENT_LENGTH]; ok {
+ req.ContentLength = StringToInt64(value[0], -1)
+ delete(req.Header, HEADER_CONTENT_LENGTH)
+ }
+
+ req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT}
+ start := GetCurrentTimestamp()
+ resp, err = obsClient.httpClient.Do(req)
+ doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
+
+ var msg interface{}
+ if err != nil {
+ respError = err
+ resp = nil
+ } else {
+ doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header)
+ if resp.StatusCode >= 300 {
+ respError = ParseResponseToObsError(resp)
+ msg = resp.Status
+ resp = nil
+ } else {
+ if output != nil {
+ respError = ParseResponseToBaseModel(resp, output, xmlResult)
+ }
+ if respError != nil {
+ doLog(LEVEL_WARN, "Parse response to BaseModel with error: %v", respError)
+ }
+ }
+ }
+
+ if msg != nil {
+ doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
+ }
+ doLog(LEVEL_DEBUG, "End method %s, obsclient cost %d ms", action, (GetCurrentTimestamp() - start))
+
+ return
+}
+
+func (obsClient ObsClient) doHttp(method, bucketName, objectKey string, params map[string]string,
+ headers map[string][]string, data interface{}, repeatable bool) (resp *http.Response, respError error) {
+
+ bucketName = strings.TrimSpace(bucketName)
+
+ objectKey = strings.TrimSpace(objectKey)
+
+ method = strings.ToUpper(method)
+
+ var redirectUrl string
+ var requestUrl string
+ maxRetryCount := obsClient.conf.maxRetryCount
+
+ var _data io.Reader
+ if data != nil {
+ if dataStr, ok := data.(string); ok {
+ doLog(LEVEL_DEBUG, "Do http request with string: %s", dataStr)
+ headers["Content-Length"] = []string{IntToString(len(dataStr))}
+ _data = strings.NewReader(dataStr)
+ } else if dataByte, ok := data.([]byte); ok {
+ doLog(LEVEL_DEBUG, "Do http request with byte array")
+ headers["Content-Length"] = []string{IntToString(len(dataByte))}
+ _data = bytes.NewReader(dataByte)
+ } else if dataReader, ok := data.(io.Reader); ok {
+ _data = dataReader
+ } else {
+ doLog(LEVEL_WARN, "Data is not a valid io.Reader")
+ return nil, errors.New("Data is not a valid io.Reader")
+ }
+ }
+
+ for i := 0; i <= maxRetryCount; i++ {
+ if redirectUrl != "" {
+ parsedRedirectUrl, err := url.Parse(redirectUrl)
+ if err != nil {
+ return nil, err
+ }
+ requestUrl, _ = obsClient.doAuth(method, bucketName, objectKey, params, headers, parsedRedirectUrl.Host)
+ if parsedRequestUrl, _ := url.Parse(requestUrl); parsedRequestUrl.RawQuery != "" && parsedRedirectUrl.RawQuery == "" {
+ redirectUrl += "?" + parsedRequestUrl.RawQuery
+ }
+ requestUrl = redirectUrl
+ } else {
+ var err error
+ requestUrl, err = obsClient.doAuth(method, bucketName, objectKey, params, headers, "")
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ req, err := http.NewRequest(method, requestUrl, _data)
+ if err != nil {
+ return nil, err
+ }
+ doLog(LEVEL_DEBUG, "Do request with url [%s] and method [%s]", requestUrl, method)
+
+ if isDebugLogEnabled() {
+ auth := headers[HEADER_AUTH_CAMEL]
+ delete(headers, HEADER_AUTH_CAMEL)
+ doLog(LEVEL_DEBUG, "Request headers: %v", headers)
+ headers[HEADER_AUTH_CAMEL] = auth
+ }
+
+ for key, value := range headers {
+ if key == HEADER_HOST_CAMEL {
+ req.Host = value[0]
+ delete(headers, key)
+ } else if key == HEADER_CONTENT_LENGTH_CAMEL {
+ req.ContentLength = StringToInt64(value[0], -1)
+ delete(headers, key)
+ } else {
+ req.Header[key] = value
+ }
+ }
+
+ req.Header[HEADER_USER_AGENT_CAMEL] = []string{USER_AGENT}
+
+ start := GetCurrentTimestamp()
+ resp, err = obsClient.httpClient.Do(req)
+ doLog(LEVEL_INFO, "Do http request cost %d ms", (GetCurrentTimestamp() - start))
+
+ var msg interface{}
+ if err != nil {
+ msg = err
+ respError = err
+ resp = nil
+ } else {
+ doLog(LEVEL_DEBUG, "Response headers: %v", resp.Header)
+ if resp.StatusCode < 300 {
+ break
+ } else if !repeatable || (resp.StatusCode >= 300 && resp.StatusCode < 500 && resp.StatusCode != 307) {
+ respError = ParseResponseToObsError(resp)
+ resp = nil
+ break
+ } else if resp.StatusCode == 307 {
+ if location := resp.Header.Get(HEADER_LOCATION_CAMEL); location != "" {
+ redirectUrl = location
+ doLog(LEVEL_WARN, "Redirect request to %s", redirectUrl)
+ msg = resp.Status
+ maxRetryCount++
+ } else {
+ respError = ParseResponseToObsError(resp)
+ resp = nil
+ break
+ }
+ } else {
+ msg = resp.Status
+ }
+ }
+ if i != maxRetryCount {
+ if _, ok := headers[HEADER_AUTH_CAMEL]; ok {
+ delete(headers, HEADER_AUTH_CAMEL)
+ }
+ doLog(LEVEL_WARN, "Failed to send request with reason:%v, will try again", msg)
+ if r, ok := _data.(*strings.Reader); ok {
+ r.Seek(0, 0)
+ } else if r, ok := _data.(*bytes.Reader); ok {
+ r.Seek(0, 0)
+ } else if r, ok := _data.(*fileReaderWrapper); ok {
+ fd, err := os.Open(r.filePath)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+ fileReaderWrapper := &fileReaderWrapper{filePath: r.filePath}
+ fileReaderWrapper.mark = r.mark
+ fileReaderWrapper.reader = fd
+ fileReaderWrapper.totalCount = r.totalCount
+ _data = fileReaderWrapper
+ fd.Seek(r.mark, 0)
+ } else if r, ok := _data.(*readerWrapper); ok {
+ r.seek(0, 0)
+ }
+ time.Sleep(time.Duration(float64(i+2) * rand.Float64() * float64(time.Second)))
+ } else {
+ doLog(LEVEL_ERROR, "Failed to send request with reason:%v", msg)
+ if resp != nil {
+ respError = ParseResponseToObsError(resp)
+ resp = nil
+ }
+ }
+ }
+ return
+}
+
+type connDelegate struct {
+ conn net.Conn
+ socketTimeout time.Duration
+ finalTimeout time.Duration
+}
+
+func getConnDelegate(conn net.Conn, socketTimeout int, finalTimeout int) *connDelegate {
+ return &connDelegate{
+ conn: conn,
+ socketTimeout: time.Second * time.Duration(socketTimeout),
+ finalTimeout: time.Second * time.Duration(finalTimeout),
+ }
+}
+
+func (delegate *connDelegate) Read(b []byte) (n int, err error) {
+ delegate.SetReadDeadline(time.Now().Add(delegate.socketTimeout))
+ n, err = delegate.conn.Read(b)
+ delegate.SetReadDeadline(time.Now().Add(delegate.finalTimeout))
+ return n, err
+}
+
+func (delegate *connDelegate) Write(b []byte) (n int, err error) {
+ delegate.SetWriteDeadline(time.Now().Add(delegate.socketTimeout))
+ n, err = delegate.conn.Write(b)
+ finalTimeout := time.Now().Add(delegate.finalTimeout)
+ delegate.SetWriteDeadline(finalTimeout)
+ delegate.SetReadDeadline(finalTimeout)
+ return n, err
+}
+
+func (delegate *connDelegate) Close() error {
+ return delegate.conn.Close()
+}
+
+func (delegate *connDelegate) LocalAddr() net.Addr {
+ return delegate.conn.LocalAddr()
+}
+
+func (delegate *connDelegate) RemoteAddr() net.Addr {
+ return delegate.conn.RemoteAddr()
+}
+
+func (delegate *connDelegate) SetDeadline(t time.Time) error {
+ return delegate.conn.SetDeadline(t)
+}
+
+func (delegate *connDelegate) SetReadDeadline(t time.Time) error {
+ return delegate.conn.SetReadDeadline(t)
+}
+
+func (delegate *connDelegate) SetWriteDeadline(t time.Time) error {
+ return delegate.conn.SetWriteDeadline(t)
+}
diff --git a/openstack/obs/log.go b/openstack/obs/log.go
new file mode 100644
index 000000000..ed0148251
--- /dev/null
+++ b/openstack/obs/log.go
@@ -0,0 +1,275 @@
+package obs
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+type Level int
+
+const (
+ LEVEL_OFF Level = 500
+ LEVEL_ERROR Level = 400
+ LEVEL_WARN Level = 300
+ LEVEL_INFO Level = 200
+ LEVEL_DEBUG Level = 100
+)
+
+const cacheCount = 50
+
+var logLevelMap = map[Level]string{
+ LEVEL_OFF: "[OFF]: ",
+ LEVEL_ERROR: "[ERROR]: ",
+ LEVEL_WARN: "[WARN]: ",
+ LEVEL_INFO: "[INFO]: ",
+ LEVEL_DEBUG: "[DEBUG]: ",
+}
+
+type logConfType struct {
+ level Level
+ logToConsole bool
+ logFullPath string
+ maxLogSize int64
+ backups int
+}
+
+func getDefaultLogConf() logConfType {
+ return logConfType{
+ level: LEVEL_WARN,
+ logToConsole: false,
+ logFullPath: "",
+ maxLogSize: 1024 * 1024 * 30, //30MB
+ backups: 10,
+ }
+}
+
+var logConf logConfType
+
+type loggerWrapper struct {
+ fullPath string
+ fd *os.File
+ queue []string
+ logger *log.Logger
+ index int
+ lock *sync.RWMutex
+}
+
+func (lw *loggerWrapper) doInit() {
+ lw.queue = make([]string, 0, cacheCount)
+ lw.logger = log.New(lw.fd, "", 0)
+ lw.lock = new(sync.RWMutex)
+}
+
+func (lw *loggerWrapper) rotate() {
+ stat, err := lw.fd.Stat()
+ if err == nil && stat.Size() >= logConf.maxLogSize {
+ lw.fd.Sync()
+ lw.fd.Close()
+
+ if lw.index > logConf.backups {
+ lw.index = 1
+ }
+ os.Rename(lw.fullPath, lw.fullPath+"."+IntToString(lw.index))
+ lw.index += 1
+
+ fd, err := os.OpenFile(lw.fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ panic(err)
+ }
+ lw.fd = fd
+ lw.logger.SetOutput(lw.fd)
+ }
+}
+
+func (lw *loggerWrapper) doFlush() {
+ lw.rotate()
+ for _, m := range lw.queue {
+ lw.logger.Println(m)
+ }
+ lw.fd.Sync()
+}
+
+func (lw *loggerWrapper) doClose() {
+ lw.doFlush()
+ lw.fd.Close()
+ lw.queue = nil
+ lw.fd = nil
+ lw.logger = nil
+ lw.lock = nil
+ lw.fullPath = ""
+}
+
+func (lw *loggerWrapper) Printf(format string, v ...interface{}) {
+ msg := fmt.Sprintf(format, v...)
+ if len(lw.queue) >= cacheCount {
+ lw.lock.Lock()
+ defer lw.lock.Unlock()
+ if len(lw.queue) >= cacheCount {
+ lw.doFlush()
+ lw.queue = make([]string, 0, cacheCount)
+ } else {
+ lw.queue = append(lw.queue, msg)
+ }
+ } else {
+ lock.RLock()
+ defer lock.RUnlock()
+ lw.queue = append(lw.queue, msg)
+ }
+}
+
+var consoleLogger *log.Logger
+var fileLogger *loggerWrapper
+
+var lock *sync.RWMutex = new(sync.RWMutex)
+
+func isDebugLogEnabled() bool {
+ return logConf.level <= LEVEL_DEBUG
+}
+
+func isErrorLogEnabled() bool {
+ return logConf.level <= LEVEL_ERROR
+}
+
+func isWarnLogEnabled() bool {
+ return logConf.level <= LEVEL_WARN
+}
+
+func isInfoLogEnabled() bool {
+ return logConf.level <= LEVEL_INFO
+}
+
+func reset() {
+ if fileLogger != nil {
+ fileLogger.doClose()
+ fileLogger = nil
+ }
+ consoleLogger = nil
+ logConf = getDefaultLogConf()
+}
+
+func InitLog(logFullPath string, maxLogSize int64, backups int, level Level, logToConsole bool) error {
+ lock.Lock()
+ defer lock.Unlock()
+ reset()
+ if fullPath := strings.TrimSpace(logFullPath); fullPath != "" {
+ _fullPath, err := filepath.Abs(fullPath)
+ if err != nil {
+ return err
+ }
+
+ if !strings.HasSuffix(_fullPath, ".log") {
+ _fullPath += ".log"
+ }
+
+ stat, err := os.Stat(_fullPath)
+ if err == nil && stat.IsDir() {
+ return errors.New(fmt.Sprintf("logFullPath:[%s] is a directory", _fullPath))
+ } else if err := os.MkdirAll(filepath.Dir(_fullPath), os.ModePerm); err != nil {
+ return err
+ }
+
+ fd, err := os.OpenFile(_fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ return err
+ }
+ fileLogger = &loggerWrapper{fullPath: _fullPath, fd: fd, index: 1}
+
+ if stat == nil {
+ stat, err = os.Stat(_fullPath)
+ }
+ prefix := stat.Name() + "."
+ walkFunc := func(path string, info os.FileInfo, err error) error {
+ if name := info.Name(); strings.HasPrefix(name, prefix) {
+ if i := StringToInt(name[len(prefix):], 0); i >= fileLogger.index {
+ fileLogger.index = i + 1
+ }
+ }
+ return nil
+ }
+
+ filepath.Walk(filepath.Dir(_fullPath), walkFunc)
+ fileLogger.doInit()
+ }
+ if maxLogSize > 0 {
+ logConf.maxLogSize = maxLogSize
+ }
+ if backups > 0 {
+ logConf.backups = backups
+ }
+ logConf.level = level
+ if logToConsole {
+ consoleLogger = log.New(os.Stdout, "", log.LstdFlags)
+ }
+ return nil
+}
+
+func CloseLog() {
+ if fileLogger != nil || consoleLogger != nil {
+ lock.Lock()
+ defer lock.Unlock()
+ reset()
+ }
+}
+
+func SyncLog() {
+ if fileLogger != nil {
+ lock.Lock()
+ defer lock.Unlock()
+ fileLogger.doFlush()
+ }
+}
+
+func logEnabled() bool {
+ return consoleLogger != nil || fileLogger != nil
+}
+
+func doLog(level Level, format string, v ...interface{}) {
+ if logEnabled() && logConf.level <= level {
+ lock.RLock()
+ defer lock.RUnlock()
+ msg := fmt.Sprintf(format, v...)
+ if _, file, line, ok := runtime.Caller(1); ok {
+ index := strings.LastIndex(file, "/")
+ if index >= 0 {
+ file = file[index+1:]
+ }
+ msg = fmt.Sprintf("%s:%d|%s", file, line, msg)
+ }
+ prefix := logLevelMap[level]
+ if consoleLogger != nil {
+ consoleLogger.Printf("%s%s", prefix, msg)
+ }
+ if fileLogger != nil {
+ nowDate := FormatUtcNow("2006-01-02T15:04:05Z")
+ fileLogger.Printf("%s %s%s", nowDate, prefix, msg)
+ }
+ }
+}
+
+func LOG(level Level, format string, v ...interface{}) {
+ if logEnabled() && logConf.level <= level {
+ lock.RLock()
+ defer lock.RUnlock()
+ msg := fmt.Sprintf(format, v...)
+ if _, file, line, ok := runtime.Caller(1); ok {
+ index := strings.LastIndex(file, "/")
+ if index >= 0 {
+ file = file[index+1:]
+ }
+ msg = fmt.Sprintf("%s:%d|%s", file, line, msg)
+ }
+ prefix := logLevelMap[level]
+ if consoleLogger != nil {
+ consoleLogger.Printf("%s%s", prefix, msg)
+ }
+ if fileLogger != nil {
+ fileLogger.Printf("%s%s", prefix, msg)
+ }
+ }
+}
diff --git a/openstack/obs/model.go b/openstack/obs/model.go
new file mode 100644
index 000000000..fb952bec4
--- /dev/null
+++ b/openstack/obs/model.go
@@ -0,0 +1,838 @@
+package obs
+
+import (
+ "encoding/xml"
+ "io"
+ "net/http"
+ "time"
+)
+
+type BaseModel struct {
+ StatusCode int `xml:"-"`
+ RequestId string `xml:"RequestId"`
+ ResponseHeaders map[string][]string `xml:"-"`
+}
+
+type Bucket struct {
+ XMLName xml.Name `xml:"Bucket"`
+ Name string `xml:"Name"`
+ CreationDate time.Time `xml:"CreationDate"`
+}
+
+type Owner struct {
+ XMLName xml.Name `xml:"Owner"`
+ ID string `xml:"ID"`
+ DisplayName string `xml:"DisplayName,omitempty"`
+}
+
+type Initiator struct {
+ XMLName xml.Name `xml:"Initiator"`
+ ID string `xml:"ID"`
+ DisplayName string `xml:"DisplayName,omitempty"`
+}
+
+type ListBucketsInput struct {
+}
+
+type ListBucketsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListAllMyBucketsResult"`
+ Owner Owner `xml:"Owner"`
+ Buckets []Bucket `xml:"Buckets>Bucket"`
+}
+
+type BucketLocation struct {
+ XMLName xml.Name `xml:"CreateBucketConfiguration"`
+ Location string `xml:"LocationConstraint"`
+}
+
+type CreateBucketInput struct {
+ BucketLocation
+ Bucket string `xml:"-"`
+ ACL AclType `xml:"-"`
+ StorageClass StorageClassType `xml:"-"`
+}
+
+type BucketStoragePolicy struct {
+ XMLName xml.Name `xml:"StoragePolicy"`
+ StorageClass StorageClassType `xml:"DefaultStorageClass"`
+}
+
+type SetBucketStoragePolicyInput struct {
+ Bucket string `xml:"-"`
+ BucketStoragePolicy
+}
+
+type GetBucketStoragePolicyOutput struct {
+ BaseModel
+ BucketStoragePolicy
+}
+
+type ListObjsInput struct {
+ Prefix string
+ MaxKeys int
+ Delimiter string
+ Origin string
+ RequestHeader string
+}
+
+type ListObjectsInput struct {
+ ListObjsInput
+ Bucket string
+ Marker string
+}
+
+type Content struct {
+ XMLName xml.Name `xml:"Contents"`
+ Owner Owner `xml:"Owner"`
+ ETag string `xml:"ETag"`
+ Key string `xml:"Key"`
+ LastModified time.Time `xml:"LastModified"`
+ Size int64 `xml:"Size"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+}
+
+type ListObjectsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListBucketResult"`
+ Delimiter string `xml:"Delimiter"`
+ IsTruncated bool `xml:"IsTruncated"`
+ Marker string `xml:"Marker"`
+ NextMarker string `xml:"NextMarker"`
+ MaxKeys int `xml:"MaxKeys"`
+ Name string `xml:"Name"`
+ Prefix string `xml:"Prefix"`
+ Contents []Content `xml:"Contents"`
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
+ Location string `xml:"-"`
+}
+
+type ListVersionsInput struct {
+ ListObjsInput
+ Bucket string
+ KeyMarker string
+ VersionIdMarker string
+}
+
+type Version struct {
+ DeleteMarker
+ XMLName xml.Name `xml:"Version"`
+ ETag string `xml:"ETag"`
+ Size int64 `xml:"Size"`
+}
+
+type DeleteMarker struct {
+ XMLName xml.Name `xml:"DeleteMarker"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId"`
+ IsLatest bool `xml:"IsLatest"`
+ LastModified time.Time `xml:"LastModified"`
+ Owner Owner `xml:"Owner"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+}
+
+type ListVersionsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListVersionsResult"`
+ Delimiter string `xml:"Delimiter"`
+ IsTruncated bool `xml:"IsTruncated"`
+ KeyMarker string `xml:"KeyMarker"`
+ NextKeyMarker string `xml:"NextKeyMarker"`
+ VersionIdMarker string `xml:"VersionIdMarker"`
+ NextVersionIdMarker string `xml:"NextVersionIdMarker"`
+ MaxKeys int `xml:"MaxKeys"`
+ Name string `xml:"Name"`
+ Prefix string `xml:"Prefix"`
+ Versions []Version `xml:"Version"`
+ DeleteMarkers []DeleteMarker `xml:"DeleteMarker"`
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
+ Location string `xml:"-"`
+}
+
+type ListMultipartUploadsInput struct {
+ Bucket string
+ Prefix string
+ MaxUploads int
+ Delimiter string
+ KeyMarker string
+ UploadIdMarker string
+}
+
+type Upload struct {
+ XMLName xml.Name `xml:"Upload"`
+ Key string `xml:"Key"`
+ UploadId string `xml:"UploadId"`
+ Initiated time.Time `xml:"Initiated"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+ Owner Owner `xml:"Owner"`
+ Initiator Initiator `xml:"Initiator"`
+}
+
+type ListMultipartUploadsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListMultipartUploadsResult"`
+ Bucket string `xml:"Bucket"`
+ KeyMarker string `xml:"KeyMarker"`
+ NextKeyMarker string `xml:"NextKeyMarker"`
+ UploadIdMarker string `xml:"UploadIdMarker"`
+ NextUploadIdMarker string `xml:"NextUploadIdMarker"`
+ Delimiter string `xml:"Delimiter"`
+ IsTruncated bool `xml:"IsTruncated"`
+ MaxUploads int `xml:"MaxUploads"`
+ Prefix string `xml:"Prefix"`
+ Uploads []Upload `xml:"Upload"`
+ CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
+}
+
+type BucketQuota struct {
+ XMLName xml.Name `xml:"Quota"`
+ Quota int64 `xml:"StorageQuota"`
+}
+
+type SetBucketQuotaInput struct {
+ Bucket string `xml:"-"`
+ BucketQuota
+}
+
+type GetBucketQuotaOutput struct {
+ BaseModel
+ BucketQuota
+}
+
+type GetBucketStorageInfoOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"GetBucketStorageInfoResult"`
+ Size int64 `xml:"Size"`
+ ObjectNumber int `xml:"ObjectNumber"`
+}
+
+type GetBucketLocationOutput struct {
+ BaseModel
+ BucketLocation
+}
+
+type Grantee struct {
+ XMLName xml.Name `xml:"Grantee"`
+ Type GranteeType `xml:"type,attr"`
+ ID string `xml:"ID,omitempty"`
+ DisplayName string `xml:"DisplayName,omitempty"`
+ URI GroupUriType `xml:"URI,omitempty"`
+}
+
+type Grant struct {
+ XMLName xml.Name `xml:"Grant"`
+ Grantee Grantee `xml:"Grantee"`
+ Permission PermissionType `xml:"Permission"`
+}
+
+type AccessControlPolicy struct {
+ XMLName xml.Name `xml:"AccessControlPolicy"`
+ Owner Owner `xml:"Owner"`
+ Grants []Grant `xml:"AccessControlList>Grant"`
+}
+
+type GetBucketAclOutput struct {
+ BaseModel
+ AccessControlPolicy
+}
+
+type SetBucketAclInput struct {
+ Bucket string `xml:"-"`
+ ACL AclType `xml:"-"`
+ AccessControlPolicy
+}
+
+type SetBucketPolicyInput struct {
+ Bucket string
+ Policy string
+}
+
+type GetBucketPolicyOutput struct {
+ BaseModel
+ Policy string
+}
+
+type CorsRule struct {
+ XMLName xml.Name `xml:"CORSRule"`
+ ID string `xml:"ID,omitempty"`
+ AllowedOrigin []string `xml:"AllowedOrigin"`
+ AllowedMethod []string `xml:"AllowedMethod"`
+ AllowedHeader []string `xml:"AllowedHeader,omitempty"`
+ MaxAgeSeconds int `xml:"MaxAgeSeconds"`
+ ExposeHeader []string `xml:"ExposeHeader,omitempty"`
+}
+
+type BucketCors struct {
+ XMLName xml.Name `xml:"CORSConfiguration"`
+ CorsRules []CorsRule `xml:"CORSRule"`
+}
+
+type SetBucketCorsInput struct {
+ Bucket string `xml:"-"`
+ BucketCors
+}
+
+type GetBucketCorsOutput struct {
+ BaseModel
+ BucketCors
+}
+
+type BucketVersioningConfiguration struct {
+ XMLName xml.Name `xml:"VersioningConfiguration"`
+ Status VersioningStatusType `xml:"Status"`
+}
+
+type SetBucketVersioningInput struct {
+ Bucket string `xml:"-"`
+ BucketVersioningConfiguration
+}
+
+type GetBucketVersioningOutput struct {
+ BaseModel
+ BucketVersioningConfiguration
+}
+
+type IndexDocument struct {
+ Suffix string `xml:"Suffix"`
+}
+
+type ErrorDocument struct {
+ Key string `xml:"Key,omitempty"`
+}
+
+type Condition struct {
+ XMLName xml.Name `xml:"Condition"`
+ KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"`
+ HttpErrorCodeReturnedEquals string `xml:"HttpErrorCodeReturnedEquals,omitempty"`
+}
+
+type Redirect struct {
+ XMLName xml.Name `xml:"Redirect"`
+ Protocol ProtocolType `xml:"Protocol,omitempty"`
+ HostName string `xml:"HostName,omitempty"`
+ ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"`
+ ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"`
+ HttpRedirectCode string `xml:"HttpRedirectCode,omitempty"`
+}
+
+type RoutingRule struct {
+ XMLName xml.Name `xml:"RoutingRule"`
+ Condition Condition `xml:"Condition,omitempty"`
+ Redirect Redirect `xml:"Redirect"`
+}
+
+type RedirectAllRequestsTo struct {
+ XMLName xml.Name `xml:"RedirectAllRequestsTo"`
+ Protocol ProtocolType `xml:"Protocol,omitempty"`
+ HostName string `xml:"HostName"`
+}
+
+type BucketWebsiteConfiguration struct {
+ XMLName xml.Name `xml:"WebsiteConfiguration"`
+ RedirectAllRequestsTo RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"`
+ IndexDocument IndexDocument `xml:"IndexDocument,omitempty"`
+ ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"`
+ RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"`
+}
+
+type SetBucketWebsiteConfigurationInput struct {
+ Bucket string `xml:"-"`
+ BucketWebsiteConfiguration
+}
+
+type GetBucketWebsiteConfigurationOutput struct {
+ BaseModel
+ BucketWebsiteConfiguration
+}
+
+type GetBucketMetadataInput struct {
+ Bucket string
+ Origin string
+ RequestHeader string
+}
+
+type GetBucketMetadataOutput struct {
+ BaseModel
+ StorageClass StorageClassType
+ Location string
+ AllowOrigin string
+ AllowMethod string
+ AllowHeader string
+ MaxAgeSeconds int
+ ExposeHeader string
+}
+
+type BucketLoggingStatus struct {
+ XMLName xml.Name `xml:"BucketLoggingStatus"`
+ TargetBucket string `xml:"LoggingEnabled>TargetBucket,omitempty"`
+ TargetPrefix string `xml:"LoggingEnabled>TargetPrefix,omitempty"`
+ TargetGrants []Grant `xml:"LoggingEnabled>TargetGrants>Grant,omitempty"`
+}
+
+type SetBucketLoggingConfigurationInput struct {
+ Bucket string `xml:"-"`
+ BucketLoggingStatus
+}
+
+type GetBucketLoggingConfigurationOutput struct {
+ BaseModel
+ BucketLoggingStatus
+}
+
+type Transition struct {
+ XMLName xml.Name `xml:"Transition"`
+ Date time.Time `xml:"Date,omitempty"`
+ Days int `xml:"Days,omitempty"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+}
+
+type Expiration struct {
+ XMLName xml.Name `xml:"Expiration"`
+ Date time.Time `xml:"Date,omitempty"`
+ Days int `xml:"Days,omitempty"`
+}
+
+type NoncurrentVersionTransition struct {
+ XMLName xml.Name `xml:"NoncurrentVersionTransition"`
+ NoncurrentDays int `xml:"NoncurrentDays"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+}
+
+type NoncurrentVersionExpiration struct {
+ XMLName xml.Name `xml:"NoncurrentVersionExpiration"`
+ NoncurrentDays int `xml:"NoncurrentDays"`
+}
+
+type LifecycleRule struct {
+ ID string `xml:"ID,omitempty"`
+ Prefix string `xml:"Prefix"`
+ Status RuleStatusType `xml:"Status"`
+ Transitions []Transition `xml:"Transition,omitempty"`
+ Expiration Expiration `xml:"Expiration,omitempty"`
+ NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
+ NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
+}
+
+type BucketLifecyleConfiguration struct {
+ XMLName xml.Name `xml:"LifecycleConfiguration"`
+ LifecycleRules []LifecycleRule `xml:"Rule"`
+}
+
+type SetBucketLifecycleConfigurationInput struct {
+ Bucket string `xml:"-"`
+ BucketLifecyleConfiguration
+}
+
+type GetBucketLifecycleConfigurationOutput struct {
+ BaseModel
+ BucketLifecyleConfiguration
+}
+
+type Tag struct {
+ XMLName xml.Name `xml:"Tag"`
+ Key string `xml:"Key"`
+ Value string `xml:"Value"`
+}
+
+type BucketTagging struct {
+ XMLName xml.Name `xml:"Tagging"`
+ Tags []Tag `xml:"TagSet>Tag"`
+}
+
+type SetBucketTaggingInput struct {
+ Bucket string `xml:"-"`
+ BucketTagging
+}
+
+type GetBucketTaggingOutput struct {
+ BaseModel
+ BucketTagging
+}
+
+type FilterRule struct {
+ XMLName xml.Name `xml:"FilterRule"`
+ Name string `xml:"Name,omitempty"`
+ Value string `xml:"Value,omitempty"`
+}
+
+type TopicConfiguration struct {
+ XMLName xml.Name `xml:"TopicConfiguration"`
+ ID string `xml:"Id,omitempty"`
+ Topic string `xml:"Topic"`
+ Events []string `xml:"Event"`
+ FilterRules []FilterRule `xml:"Filter>S3Key>FilterRule"`
+}
+
+type BucketNotification struct {
+ XMLName xml.Name `xml:"NotificationConfiguration"`
+ TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration"`
+}
+
+type SetBucketNotificationInput struct {
+ Bucket string `xml:"-"`
+ BucketNotification
+}
+
+type GetBucketNotificationOutput struct {
+ BaseModel
+ BucketNotification
+}
+
+type DeleteObjectInput struct {
+ Bucket string
+ Key string
+ VersionId string
+}
+
+type DeleteObjectOutput struct {
+ BaseModel
+ VersionId string
+ DeleteMarker bool
+}
+
+type ObjectToDelete struct {
+ XMLName xml.Name `xml:"Object"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId,omitempty"`
+}
+
+type DeleteObjectsInput struct {
+ Bucket string `xml:"-"`
+ XMLName xml.Name `xml:"Delete"`
+ Quiet bool `xml:"Quiet,omitempty"`
+ Objects []ObjectToDelete `xml:"Object"`
+}
+
+type Deleted struct {
+ XMLName xml.Name `xml:"Deleted"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId"`
+ DeleteMarker bool `xml:"DeleteMarker"`
+ DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"`
+}
+
+type Error struct {
+ XMLName xml.Name `xml:"Error"`
+ Key string `xml:"Key"`
+ VersionId string `xml:"VersionId"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+}
+
+type DeleteObjectsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"DeleteResult"`
+ Deleteds []Deleted `xml:"Deleted"`
+ Errors []Error `xml:"Error"`
+}
+
+type SetObjectAclInput struct {
+ Bucket string `xml:"-"`
+ Key string `xml:"-"`
+ VersionId string `xml:"-"`
+ ACL AclType `xml:"-"`
+ AccessControlPolicy
+}
+
+type GetObjectAclInput struct {
+ Bucket string
+ Key string
+ VersionId string
+}
+
+type GetObjectAclOutput struct {
+ BaseModel
+ VersionId string
+ AccessControlPolicy
+}
+
+type RestoreObjectInput struct {
+ Bucket string `xml:"-"`
+ Key string `xml:"-"`
+ VersionId string `xml:"-"`
+ XMLName xml.Name `xml:"RestoreRequest"`
+ Days int `xml:"Days"`
+ Tier RestoreTierType `xml:"GlacierJobParameters>Tier,omitempty"`
+}
+
+type ISseHeader interface {
+ GetEncryption() string
+ GetKey() string
+}
+
+type SseKmsHeader struct {
+ Encryption string
+ Key string
+}
+
+type SseCHeader struct {
+ Encryption string
+ Key string
+ KeyMD5 string
+}
+
+type GetObjectMetadataInput struct {
+ Bucket string
+ Key string
+ VersionId string
+ Origin string
+ RequestHeader string
+ SseHeader ISseHeader
+}
+
+type GetObjectMetadataOutput struct {
+ BaseModel
+ VersionId string
+ WebsiteRedirectLocation string
+ Expiration string
+ Restore string
+ StorageClass StorageClassType
+ ContentLength int64
+ ContentType string
+ ETag string
+ AllowOrigin string
+ AllowHeader string
+ AllowMethod string
+ ExposeHeader string
+ MaxAgeSeconds int
+ LastModified time.Time
+ SseHeader ISseHeader
+ Metadata map[string]string
+}
+
+type GetObjectInput struct {
+ GetObjectMetadataInput
+ IfMatch string
+ IfNoneMatch string
+ IfUnmodifiedSince time.Time
+ IfModifiedSince time.Time
+ RangeStart int64
+ RangeEnd int64
+ ImageProcess string
+ ResponseCacheControl string
+ ResponseContentDisposition string
+ ResponseContentEncoding string
+ ResponseContentLanguage string
+ ResponseContentType string
+ ResponseExpires string
+}
+
+type GetObjectOutput struct {
+ GetObjectMetadataOutput
+ DeleteMarker bool
+ CacheControl string
+ ContentDisposition string
+ ContentEncoding string
+ ContentLanguage string
+ Expires string
+ Body io.ReadCloser
+}
+
+type ObjectOperationInput struct {
+ Bucket string
+ Key string
+ ACL AclType
+ StorageClass StorageClassType
+ WebsiteRedirectLocation string
+ SseHeader ISseHeader
+ Metadata map[string]string
+}
+
+type PutObjectBasicInput struct {
+ ObjectOperationInput
+ ContentType string
+ ContentMD5 string
+ ContentLength int64
+}
+
+type PutObjectInput struct {
+ PutObjectBasicInput
+ Body io.Reader
+}
+
+type PutFileInput struct {
+ PutObjectBasicInput
+ SourceFile string
+}
+
+type PutObjectOutput struct {
+ BaseModel
+ VersionId string
+ SseHeader ISseHeader
+ StorageClass StorageClassType
+ ETag string
+}
+
+type CopyObjectInput struct {
+ ObjectOperationInput
+ CopySourceBucket string
+ CopySourceKey string
+ CopySourceVersionId string
+ CopySourceIfMatch string
+ CopySourceIfNoneMatch string
+ CopySourceIfUnmodifiedSince time.Time
+ CopySourceIfModifiedSince time.Time
+ SourceSseHeader ISseHeader
+ CacheControl string
+ ContentDisposition string
+ ContentEncoding string
+ ContentLanguage string
+ ContentType string
+ Expires string
+ MetadataDirective MetadataDirectiveType
+}
+
+type CopyObjectOutput struct {
+ BaseModel
+ CopySourceVersionId string `xml:"-"`
+ VersionId string `xml:"-"`
+ SseHeader ISseHeader `xml:"-"`
+ XMLName xml.Name `xml:"CopyObjectResult"`
+ LastModified time.Time `xml:"LastModified"`
+ ETag string `xml:"ETag"`
+}
+
+type AbortMultipartUploadInput struct {
+ Bucket string
+ Key string
+ UploadId string
+}
+
+type InitiateMultipartUploadInput struct {
+ ObjectOperationInput
+ ContentType string
+}
+
+type InitiateMultipartUploadOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ UploadId string `xml:"UploadId"`
+ SseHeader ISseHeader
+}
+
+type UploadPartInput struct {
+ Bucket string
+ Key string
+ PartNumber int
+ UploadId string
+ ContentMD5 string
+ SseHeader ISseHeader
+ Body io.Reader
+ SourceFile string
+ Offset int64
+ PartSize int64
+}
+
+type UploadPartOutput struct {
+ BaseModel
+ PartNumber int
+ ETag string
+ SseHeader ISseHeader
+}
+
+type Part struct {
+ XMLName xml.Name `xml:"Part"`
+ PartNumber int `xml:"PartNumber"`
+ ETag string `xml:"ETag"`
+ LastModified time.Time `xml:"LastModified,omitempty"`
+ Size int64 `xml:"Size,omitempty"`
+}
+
+type CompleteMultipartUploadInput struct {
+ Bucket string `xml:"-"`
+ Key string `xml:"-"`
+ UploadId string `xml:"-"`
+ XMLName xml.Name `xml:"CompleteMultipartUpload"`
+ Parts []Part `xml:"Part"`
+}
+
+type CompleteMultipartUploadOutput struct {
+ BaseModel
+ VersionId string `xml:"-"`
+ SseHeader ISseHeader `xml:"-"`
+ XMLName xml.Name `xml:"CompleteMultipartUploadResult"`
+ Location string `xml:"Location"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ ETag string `xml:"ETag"`
+}
+
+type ListPartsInput struct {
+ Bucket string
+ Key string
+ UploadId string
+ MaxParts int
+ PartNumberMarker int
+}
+
+type ListPartsOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"ListPartsResult"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ UploadId string `xml:"UploadId"`
+ PartNumberMarker int `xml:"PartNumberMarker"`
+ NextPartNumberMarker int `xml:"NextPartNumberMarker"`
+ MaxParts int `xml:"MaxParts"`
+ IsTruncated bool `xml:"IsTruncated"`
+ StorageClass StorageClassType `xml:"StorageClass"`
+ Initiator Initiator `xml:"Initiator"`
+ Owner Owner `xml:"Owner"`
+ Parts []Part `xml:"Part"`
+}
+
+type CopyPartInput struct {
+ Bucket string
+ Key string
+ UploadId string
+ PartNumber int
+ CopySourceBucket string
+ CopySourceKey string
+ CopySourceVersionId string
+ CopySourceRangeStart int64
+ CopySourceRangeEnd int64
+ SseHeader ISseHeader
+ SourceSseHeader ISseHeader
+}
+
+type CopyPartOutput struct {
+ BaseModel
+ XMLName xml.Name `xml:"CopyPartResult"`
+ PartNumber int `xml:"-"`
+ ETag string `xml:"ETag"`
+ LastModified time.Time `xml:"LastModified"`
+ SseHeader ISseHeader `xml:"-"`
+}
+
+type CreateSignedUrlInput struct {
+ Method HttpMethodType
+ Bucket string
+ Key string
+ SubResource SubResourceType
+ Expires int
+ Headers map[string]string
+ QueryParams map[string]string
+}
+
+type CreateSignedUrlOutput struct {
+ SignedUrl string
+ ActualSignedRequestHeaders http.Header
+}
+
+type CreateBrowserBasedSignatureInput struct {
+ Bucket string
+ Key string
+ Expires int
+ FormParams map[string]string
+}
+
+type CreateBrowserBasedSignatureOutput struct {
+ OriginPolicy string
+ Policy string
+ Algorithm string
+ Credential string
+ Date string
+ Signature string
+}
diff --git a/openstack/obs/temporary.go b/openstack/obs/temporary.go
new file mode 100644
index 000000000..9e457f512
--- /dev/null
+++ b/openstack/obs/temporary.go
@@ -0,0 +1,666 @@
+package obs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+)
+
+func (obsClient ObsClient) CreateSignedUrl(input *CreateSignedUrlInput) (output *CreateSignedUrlOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CreateSignedUrlInput is nil")
+ }
+
+ params := make(map[string]string, len(input.QueryParams))
+ for key, value := range input.QueryParams {
+ params[key] = value
+ }
+
+ if input.SubResource != "" {
+ params[string(input.SubResource)] = ""
+ }
+
+ headers := make(map[string][]string, len(input.Headers))
+ for key, value := range input.Headers {
+ headers[key] = []string{value}
+ }
+
+ if input.Expires <= 0 {
+ input.Expires = 300
+ }
+
+ requestUrl, err := obsClient.doAuthTemporary(string(input.Method), input.Bucket, input.Key, params, headers, int64(input.Expires))
+ if err != nil {
+ return nil, err
+ }
+
+ output = &CreateSignedUrlOutput{
+ SignedUrl: requestUrl,
+ ActualSignedRequestHeaders: headers,
+ }
+ return
+}
+
+func (obsClient ObsClient) CreateBrowserBasedSignature(input *CreateBrowserBasedSignatureInput) (output *CreateBrowserBasedSignatureOutput, err error) {
+ if input == nil {
+ return nil, errors.New("CreateBrowserBasedSignatureInput is nil")
+ }
+
+ params := make(map[string]string, len(input.FormParams))
+ for key, value := range input.FormParams {
+ params[key] = value
+ }
+
+ date := time.Now().UTC()
+ shortDate := date.Format(SHORT_DATE_FORMAT)
+ longDate := date.Format(LONG_DATE_FORMAT)
+
+ credential, _ := getCredential(obsClient.conf.securityProvider.ak, obsClient.conf.region, shortDate)
+
+ if input.Expires <= 0 {
+ input.Expires = 300
+ }
+
+ expiration := date.Add(time.Second * time.Duration(input.Expires)).Format(ISO8601_DATE_FORMAT)
+ params[PARAM_ALGORITHM_AMZ_CAMEL] = V4_HASH_PREFIX
+ params[PARAM_CREDENTIAL_AMZ_CAMEL] = credential
+ params[PARAM_DATE_AMZ_CAMEL] = longDate
+
+ if obsClient.conf.securityProvider.securityToken != "" {
+ params[HEADER_STS_TOKEN_AMZ] = obsClient.conf.securityProvider.securityToken
+ }
+
+ matchAnyBucket := true
+ matchAnyKey := true
+ count := 5
+ if bucket := strings.TrimSpace(input.Bucket); bucket != "" {
+ params["bucket"] = bucket
+ matchAnyBucket = false
+ count--
+ }
+
+ if key := strings.TrimSpace(input.Key); key != "" {
+ params["key"] = key
+ matchAnyKey = false
+ count--
+ }
+
+ originPolicySlice := make([]string, 0, len(params)+count)
+ originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"expiration\":\"%s\",", expiration))
+ originPolicySlice = append(originPolicySlice, "\"conditions\":[")
+ for key, value := range params {
+ if _key := strings.TrimSpace(strings.ToLower(key)); _key != "" {
+ originPolicySlice = append(originPolicySlice, fmt.Sprintf("{\"%s\":\"%s\"},", _key, value))
+ }
+ }
+
+ if matchAnyBucket {
+ originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$bucket\", \"\"],")
+ }
+
+ if matchAnyKey {
+ originPolicySlice = append(originPolicySlice, "[\"starts-with\", \"$key\", \"\"],")
+ }
+
+ originPolicySlice = append(originPolicySlice, "]}")
+
+ originPolicy := strings.Join(originPolicySlice, "")
+ policy := Base64Encode([]byte(originPolicy))
+ signature := getSignature(policy, obsClient.conf.securityProvider.sk, obsClient.conf.region, shortDate)
+
+ output = &CreateBrowserBasedSignatureOutput{
+ OriginPolicy: originPolicy,
+ Policy: policy,
+ Algorithm: params[PARAM_ALGORITHM_AMZ_CAMEL],
+ Credential: params[PARAM_CREDENTIAL_AMZ_CAMEL],
+ Date: params[PARAM_DATE_AMZ_CAMEL],
+ Signature: signature,
+ }
+ return
+}
+
+func (obsClient ObsClient) ListBucketsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListBucketsOutput, err error) {
+ output = &ListBucketsOutput{}
+ err = obsClient.doHttpWithSignedUrl("ListBuckets", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) CreateBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("CreateBucket", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("DeleteBucket", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketStoragePolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketStoragePolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStoragePolicyOutput, err error) {
+ output = &GetBucketStoragePolicyOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketStoragePolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) ListObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListObjectsOutput, err error) {
+ output = &ListObjectsOutput{}
+ err = obsClient.doHttpWithSignedUrl("ListObjects", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = location[0]
+ }
+ }
+ return
+}
+
+func (obsClient ObsClient) ListVersionsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListVersionsOutput, err error) {
+ output = &ListVersionsOutput{}
+ err = obsClient.doHttpWithSignedUrl("ListVersions", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ if location, ok := output.ResponseHeaders[HEADER_BUCKET_REGION]; ok {
+ output.Location = location[0]
+ }
+ }
+ return
+}
+
+func (obsClient ObsClient) ListMultipartUploadsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListMultipartUploadsOutput, err error) {
+ output = &ListMultipartUploadsOutput{}
+ err = obsClient.doHttpWithSignedUrl("ListMultipartUploads", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketQuota", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketQuotaWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketQuotaOutput, err error) {
+ output = &GetBucketQuotaOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketQuota", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) HeadBucketWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("HeadBucket", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketMetadataOutput, err error) {
+ output = &GetBucketMetadataOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetBucketMetadataOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketStorageInfoWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketStorageInfoOutput, err error) {
+ output = &GetBucketStorageInfoOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketStorageInfo", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketLocationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLocationOutput, err error) {
+ output = &GetBucketLocationOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketLocation", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketAclOutput, err error) {
+ output = &GetBucketAclOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketPolicy", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketPolicyOutput, err error) {
+ output = &GetBucketPolicyOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketPolicy", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, false)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketPolicyWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("DeleteBucketPolicy", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketCors", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketCorsOutput, err error) {
+ output = &GetBucketCorsOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketCors", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketCorsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("DeleteBucketCors", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketVersioning", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketVersioningWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketVersioningOutput, err error) {
+ output = &GetBucketVersioningOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketVersioning", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketWebsiteConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketWebsiteConfigurationOutput, err error) {
+ output = &GetBucketWebsiteConfigurationOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketWebsiteConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketWebsiteConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("DeleteBucketWebsiteConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketLoggingConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketLoggingConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLoggingConfigurationOutput, err error) {
+ output = &GetBucketLoggingConfigurationOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketLoggingConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketLifecycleConfiguration", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketLifecycleConfigurationOutput, err error) {
+ output = &GetBucketLifecycleConfigurationOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketLifecycleConfiguration", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketLifecycleConfigurationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("DeleteBucketLifecycleConfiguration", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketTagging", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketTaggingOutput, err error) {
+ output = &GetBucketTaggingOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketTagging", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteBucketTaggingWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("DeleteBucketTagging", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetBucketNotification", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetBucketNotificationWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetBucketNotificationOutput, err error) {
+ output = &GetBucketNotificationOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetBucketNotification", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *DeleteObjectOutput, err error) {
+ output = &DeleteObjectOutput{}
+ err = obsClient.doHttpWithSignedUrl("DeleteObject", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseDeleteObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) DeleteObjectsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *DeleteObjectsOutput, err error) {
+ output = &DeleteObjectsOutput{}
+ err = obsClient.doHttpWithSignedUrl("DeleteObjects", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) SetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("SetObjectAcl", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetObjectAclWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectAclOutput, err error) {
+ output = &GetObjectAclOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetObjectAcl", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ if versionId, ok := output.ResponseHeaders[HEADER_VERSION_ID]; ok {
+ output.VersionId = versionId[0]
+ }
+ }
+ return
+}
+
+func (obsClient ObsClient) RestoreObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("RestoreObject", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) GetObjectMetadataWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectMetadataOutput, err error) {
+ output = &GetObjectMetadataOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetObjectMetadata", HTTP_HEAD, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetObjectMetadataOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) GetObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *GetObjectOutput, err error) {
+ output = &GetObjectOutput{}
+ err = obsClient.doHttpWithSignedUrl("GetObject", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseGetObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) PutObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *PutObjectOutput, err error) {
+ output = &PutObjectOutput{}
+ err = obsClient.doHttpWithSignedUrl("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParsePutObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) PutFileWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, sourceFile string) (output *PutObjectOutput, err error) {
+ var data io.Reader
+ sourceFile = strings.TrimSpace(sourceFile)
+ if sourceFile != "" {
+ fd, err := os.Open(sourceFile)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+
+ stat, err := fd.Stat()
+ if err != nil {
+ return nil, err
+ }
+ fileReaderWrapper := &fileReaderWrapper{filePath: sourceFile}
+ fileReaderWrapper.reader = fd
+
+ var contentLength int64
+ if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH_CAMEL]; ok {
+ contentLength = StringToInt64(value[0], -1)
+ } else if value, ok := actualSignedRequestHeaders[HEADER_CONTENT_LENGTH]; ok {
+ contentLength = StringToInt64(value[0], -1)
+ } else {
+ contentLength = stat.Size()
+ }
+ if contentLength > stat.Size() {
+ return nil, errors.New("ContentLength is larger than fileSize")
+ }
+ fileReaderWrapper.totalCount = contentLength
+ data = fileReaderWrapper
+ }
+
+ output = &PutObjectOutput{}
+ err = obsClient.doHttpWithSignedUrl("PutObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParsePutObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) CopyObjectWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyObjectOutput, err error) {
+ output = &CopyObjectOutput{}
+ err = obsClient.doHttpWithSignedUrl("CopyObject", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCopyObjectOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) AbortMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *BaseModel, err error) {
+ output = &BaseModel{}
+ err = obsClient.doHttpWithSignedUrl("AbortMultipartUpload", HTTP_DELETE, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) InitiateMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *InitiateMultipartUploadOutput, err error) {
+ output = &InitiateMultipartUploadOutput{}
+ err = obsClient.doHttpWithSignedUrl("InitiateMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseInitiateMultipartUploadOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) UploadPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *UploadPartOutput, err error) {
+ output = &UploadPartOutput{}
+ err = obsClient.doHttpWithSignedUrl("UploadPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseUploadPartOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) CompleteMultipartUploadWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header, data io.Reader) (output *CompleteMultipartUploadOutput, err error) {
+ output = &CompleteMultipartUploadOutput{}
+ err = obsClient.doHttpWithSignedUrl("CompleteMultipartUpload", HTTP_POST, signedUrl, actualSignedRequestHeaders, data, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCompleteMultipartUploadOutput(output)
+ }
+ return
+}
+
+func (obsClient ObsClient) ListPartsWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *ListPartsOutput, err error) {
+ output = &ListPartsOutput{}
+ err = obsClient.doHttpWithSignedUrl("ListParts", HTTP_GET, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ }
+ return
+}
+
+func (obsClient ObsClient) CopyPartWithSignedUrl(signedUrl string, actualSignedRequestHeaders http.Header) (output *CopyPartOutput, err error) {
+ output = &CopyPartOutput{}
+ err = obsClient.doHttpWithSignedUrl("CopyPart", HTTP_PUT, signedUrl, actualSignedRequestHeaders, nil, output, true)
+ if err != nil {
+ output = nil
+ } else {
+ ParseCopyPartOutput(output)
+ }
+ return
+}
diff --git a/openstack/obs/trait.go b/openstack/obs/trait.go
new file mode 100644
index 000000000..49bfcc7ec
--- /dev/null
+++ b/openstack/obs/trait.go
@@ -0,0 +1,604 @@
+package obs
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+type IReadCloser interface {
+ setReadCloser(body io.ReadCloser)
+}
+
+func (output *GetObjectOutput) setReadCloser(body io.ReadCloser) {
+ output.Body = body
+}
+
+type IBaseModel interface {
+ setStatusCode(statusCode int)
+
+ setRequestId(requestId string)
+
+ setResponseHeaders(responseHeaders map[string][]string)
+}
+
+type ISerializable interface {
+ trans() (map[string]string, map[string][]string, interface{})
+}
+
+type DefaultSerializable struct {
+ params map[string]string
+ headers map[string][]string
+ data interface{}
+}
+
+func (input DefaultSerializable) trans() (map[string]string, map[string][]string, interface{}) {
+ return input.params, input.headers, input.data
+}
+
+var defaultSerializable = &DefaultSerializable{}
+
+func newSubResourceSerial(subResource SubResourceType) *DefaultSerializable {
+ return &DefaultSerializable{map[string]string{string(subResource): ""}, nil, nil}
+}
+
+func trans(subResource SubResourceType, input interface{}) (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(subResource): ""}
+ data, _ = ConvertRequestToIoReader(input)
+ return
+}
+
+func (baseModel *BaseModel) setStatusCode(statusCode int) {
+ baseModel.StatusCode = statusCode
+}
+
+func (baseModel *BaseModel) setRequestId(requestId string) {
+ baseModel.RequestId = requestId
+}
+
+func (baseModel *BaseModel) setResponseHeaders(responseHeaders map[string][]string) {
+ baseModel.ResponseHeaders = responseHeaders
+}
+
+func (input ListBucketsInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ return
+}
+
+func (input CreateBucketInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ headers = make(map[string][]string)
+ if acl := string(input.ACL); acl != "" {
+ headers[HEADER_ACL_AMZ] = []string{acl}
+ }
+
+ if storageClass := string(input.StorageClass); storageClass != "" {
+ headers[HEADER_STORAGE_CLASS] = []string{storageClass}
+ }
+
+ if location := strings.TrimSpace(input.Location); location != "" {
+ input.Location = location
+ data, _ = ConvertRequestToIoReader(input)
+ }
+ return
+}
+
+func (input SetBucketStoragePolicyInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ return trans(SubResourceStoragePolicy, input)
+}
+
+func (input ListObjsInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = make(map[string]string)
+ if input.Prefix != "" {
+ params["prefix"] = input.Prefix
+ }
+ if input.Delimiter != "" {
+ params["delimiter"] = input.Delimiter
+ }
+ if input.MaxKeys > 0 {
+ params["max-keys"] = IntToString(input.MaxKeys)
+ }
+ headers = make(map[string][]string)
+ if origin := strings.TrimSpace(input.Origin); origin != "" {
+ headers[HEADER_ORIGIN_CAMEL] = []string{origin}
+ }
+ if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
+ headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
+ }
+ return
+}
+
+func (input ListObjectsInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params, headers, data = input.ListObjsInput.trans()
+ if input.Marker != "" {
+ params["marker"] = input.Marker
+ }
+ return
+}
+
+func (input ListVersionsInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params, headers, data = input.ListObjsInput.trans()
+ params[string(SubResourceVersions)] = ""
+ if input.KeyMarker != "" {
+ params["key-marker"] = input.KeyMarker
+ }
+ if input.VersionIdMarker != "" {
+ params["version-id-marker"] = input.VersionIdMarker
+ }
+ return
+}
+
+func (input ListMultipartUploadsInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceUploads): ""}
+ if input.Prefix != "" {
+ params["prefix"] = input.Prefix
+ }
+ if input.Delimiter != "" {
+ params["delimiter"] = input.Delimiter
+ }
+ if input.MaxUploads > 0 {
+ params["max-uploads"] = IntToString(input.MaxUploads)
+ }
+ if input.KeyMarker != "" {
+ params["key-marker"] = input.KeyMarker
+ }
+ if input.UploadIdMarker != "" {
+ params["upload-id-marker"] = input.UploadIdMarker
+ }
+ return
+}
+
+func (input SetBucketQuotaInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ return trans(SubResourceQuota, input)
+}
+
+func (input SetBucketAclInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceAcl): ""}
+ headers = make(map[string][]string)
+
+ if acl := string(input.ACL); acl != "" {
+ headers[HEADER_ACL_AMZ] = []string{acl}
+ } else {
+ data, _ = ConvertAclToXml(input.AccessControlPolicy, false)
+ }
+ return
+}
+
+func (input SetBucketPolicyInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourcePolicy): ""}
+ data = strings.NewReader(input.Policy)
+ return
+}
+
+func (input SetBucketCorsInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceCors): ""}
+ data, md5, _ := ConvertRequestToIoReaderV2(input)
+ headers = map[string][]string{HEADER_MD5_CAMEL: []string{md5}}
+ return
+}
+
+func (input SetBucketVersioningInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ return trans(SubResourceVersioning, input)
+}
+
+func (input SetBucketWebsiteConfigurationInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceWebsite): ""}
+ data, _ = ConvertWebsiteConfigurationToXml(input.BucketWebsiteConfiguration, false)
+ return
+}
+
+func (input GetBucketMetadataInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ headers = make(map[string][]string)
+ if origin := strings.TrimSpace(input.Origin); origin != "" {
+ headers[HEADER_ORIGIN_CAMEL] = []string{origin}
+ }
+ if requestHeader := strings.TrimSpace(input.RequestHeader); requestHeader != "" {
+ headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{requestHeader}
+ }
+ return
+}
+
+func (input SetBucketLoggingConfigurationInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceLogging): ""}
+ data, _ = ConvertLoggingStatusToXml(input.BucketLoggingStatus, false)
+ return
+}
+
+func (input SetBucketLifecycleConfigurationInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceLifecycle): ""}
+ data, md5 := ConvertLifecyleConfigurationToXml(input.BucketLifecyleConfiguration, true)
+ headers = map[string][]string{HEADER_MD5_CAMEL: []string{md5}}
+ return
+}
+
+func (input SetBucketTaggingInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceTagging): ""}
+ data, md5, _ := ConvertRequestToIoReaderV2(input)
+ headers = map[string][]string{HEADER_MD5_CAMEL: []string{md5}}
+ return
+}
+
+func (input SetBucketNotificationInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceNotification): ""}
+ data, _ = ConvertNotificationToXml(input.BucketNotification, false)
+ return
+}
+
+func (input DeleteObjectInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = make(map[string]string)
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ return
+}
+
+func (input DeleteObjectsInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceDelete): ""}
+ data, md5, _ := ConvertRequestToIoReaderV2(input)
+ headers = map[string][]string{HEADER_MD5_CAMEL: []string{md5}}
+ return
+}
+
+func (input SetObjectAclInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceAcl): ""}
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ headers = make(map[string][]string)
+ if acl := string(input.ACL); acl != "" {
+ headers[HEADER_ACL_AMZ] = []string{acl}
+ } else {
+ data, _ = ConvertAclToXml(input.AccessControlPolicy, false)
+ }
+ return
+}
+
+func (input GetObjectAclInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceAcl): ""}
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ return
+}
+
+func (input RestoreObjectInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{string(SubResourceRestore): ""}
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ data, _ = ConvertRequestToIoReader(input)
+ return
+}
+
+func (header SseKmsHeader) GetEncryption() string {
+ if header.Encryption != "" {
+ return header.Encryption
+ }
+ return DEFAULT_SSE_KMS_ENCRYPTION
+}
+
+func (header SseKmsHeader) GetKey() string {
+ return header.Key
+}
+
+func (header SseCHeader) GetEncryption() string {
+ if header.Encryption != "" {
+ return header.Encryption
+ }
+ return DEFAULT_SSE_C_ENCRYPTION
+}
+
+func (header SseCHeader) GetKey() string {
+ return header.Key
+}
+
+func (header SseCHeader) GetKeyMD5() string {
+ if header.KeyMD5 != "" {
+ return header.KeyMD5
+ }
+
+ if ret, err := Base64Decode(header.GetKey()); err == nil {
+ return Base64Md5(ret)
+ }
+ return ""
+}
+
+func setSseHeader(headers map[string][]string, sseHeader ISseHeader, sseCOnly bool) {
+ if sseHeader != nil {
+ if sseCHeader, ok := sseHeader.(SseCHeader); ok {
+ headers[HEADER_SSEC_ENCRYPTION_AMZ] = []string{sseCHeader.GetEncryption()}
+ headers[HEADER_SSEC_KEY_AMZ] = []string{sseCHeader.GetKey()}
+ headers[HEADER_SSEC_KEY_MD5_AMZ] = []string{sseCHeader.GetKeyMD5()}
+ } else if sseKmsHeader, ok := sseHeader.(SseKmsHeader); !sseCOnly && ok {
+ headers[HEADER_SSEKMS_ENCRYPTION_AMZ] = []string{sseKmsHeader.GetEncryption()}
+ headers[HEADER_SSEKMS_KEY_AMZ] = []string{sseKmsHeader.GetKey()}
+ }
+ }
+}
+
+func (input GetObjectMetadataInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = make(map[string]string)
+ if input.VersionId != "" {
+ params[PARAM_VERSION_ID] = input.VersionId
+ }
+ headers = make(map[string][]string)
+
+ if input.Origin != "" {
+ headers[HEADER_ORIGIN_CAMEL] = []string{input.Origin}
+ }
+
+ if input.RequestHeader != "" {
+ headers[HEADER_ACCESS_CONTROL_REQUEST_HEADER_CAMEL] = []string{input.RequestHeader}
+ }
+ setSseHeader(headers, input.SseHeader, true)
+ return
+}
+
+func (input GetObjectInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params, headers, data = input.GetObjectMetadataInput.trans()
+ if input.ResponseCacheControl != "" {
+ params[PARAM_RESPONSE_CACHE_CONTROL] = input.ResponseCacheControl
+ }
+ if input.ResponseContentDisposition != "" {
+ params[PARAM_RESPONSE_CONTENT_DISPOSITION] = input.ResponseContentDisposition
+ }
+ if input.ResponseContentEncoding != "" {
+ params[PARAM_RESPONSE_CONTENT_ENCODING] = input.ResponseContentEncoding
+ }
+ if input.ResponseContentLanguage != "" {
+ params[PARAM_RESPONSE_CONTENT_LANGUAGE] = input.ResponseContentLanguage
+ }
+ if input.ResponseContentType != "" {
+ params[PARAM_RESPONSE_CONTENT_TYPE] = input.ResponseContentType
+ }
+ if input.ResponseExpires != "" {
+ params[PARAM_RESPONSE_EXPIRES] = input.ResponseExpires
+ }
+ if input.ImageProcess != "" {
+ params[PARAM_IMAGE_PROCESS] = input.ImageProcess
+ }
+ if input.RangeStart >= 0 && input.RangeEnd > input.RangeStart {
+ headers[HEADER_RANGE] = []string{fmt.Sprintf("bytes=%d-%d", input.RangeStart, input.RangeEnd)}
+ }
+
+ if input.IfMatch != "" {
+ headers[HEADER_IF_MATCH] = []string{input.IfMatch}
+ }
+ if input.IfNoneMatch != "" {
+ headers[HEADER_IF_NONE_MATCH] = []string{input.IfNoneMatch}
+ }
+ if !input.IfModifiedSince.IsZero() {
+ headers[HEADER_IF_MODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfModifiedSince)}
+ }
+ if !input.IfUnmodifiedSince.IsZero() {
+ headers[HEADER_IF_UNMODIFIED_SINCE] = []string{FormatUtcToRfc1123(input.IfUnmodifiedSince)}
+ }
+ return
+}
+
+func (input ObjectOperationInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ headers = make(map[string][]string)
+ params = make(map[string]string)
+ if acl := string(input.ACL); acl != "" {
+ headers[HEADER_ACL_AMZ] = []string{acl}
+ }
+ if storageClass := string(input.StorageClass); storageClass != "" {
+ headers[HEADER_STORAGE_CLASS2_AMZ] = []string{storageClass}
+ }
+ if input.WebsiteRedirectLocation != "" {
+ headers[HEADER_WEBSITE_REDIRECT_LOCATION_AMZ] = []string{input.WebsiteRedirectLocation}
+ }
+ setSseHeader(headers, input.SseHeader, false)
+ if input.Metadata != nil {
+ for key, value := range input.Metadata {
+ key = strings.TrimSpace(key)
+ if !strings.HasPrefix(key, HEADER_PREFIX_META) {
+ key = HEADER_PREFIX_META + key
+ }
+ headers[key] = []string{value}
+ }
+ }
+ return
+}
+
+func (input PutObjectBasicInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params, headers, data = input.ObjectOperationInput.trans()
+
+ if input.ContentMD5 != "" {
+ headers[HEADER_MD5_CAMEL] = []string{input.ContentMD5}
+ }
+
+ if input.ContentLength > 0 {
+ headers[HEADER_CONTENT_LENGTH_CAMEL] = []string{Int64ToString(input.ContentLength)}
+ }
+ if input.ContentType != "" {
+ headers[HEADER_CONTENT_TYPE_CAML] = []string{input.ContentType}
+ }
+
+ return
+}
+
+func (input PutObjectInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params, headers, data = input.PutObjectBasicInput.trans()
+ if input.Body != nil {
+ data = input.Body
+ }
+ return
+}
+
+func (input CopyObjectInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params, headers, data = input.ObjectOperationInput.trans()
+
+ var copySource string
+ if input.CopySourceVersionId != "" {
+ copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, input.CopySourceKey, input.CopySourceVersionId)
+ } else {
+ copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, input.CopySourceKey)
+ }
+ headers[HEADER_COPY_SOURCE_AMZ] = []string{copySource}
+
+ if directive := string(input.MetadataDirective); directive != "" {
+ headers[HEADER_METADATA_DIRECTIVE_AMZ] = []string{directive}
+ }
+
+ if input.MetadataDirective == ReplaceMetadata {
+ if input.CacheControl != "" {
+ headers[HEADER_CACHE_CONTROL] = []string{input.CacheControl}
+ }
+ if input.ContentDisposition != "" {
+ headers[HEADER_CONTENT_DISPOSITION] = []string{input.ContentDisposition}
+ }
+ if input.ContentEncoding != "" {
+ headers[HEADER_CONTENT_ENCODING] = []string{input.ContentEncoding}
+ }
+ if input.ContentLanguage != "" {
+ headers[HEADER_CONTENT_LANGUAGE] = []string{input.ContentLanguage}
+ }
+ if input.ContentType != "" {
+ headers[HEADER_CONTENT_TYPE] = []string{input.ContentType}
+ }
+ if input.Expires != "" {
+ headers[HEADER_EXPIRES] = []string{input.Expires}
+ }
+ }
+
+ if input.CopySourceIfMatch != "" {
+ headers[HEADER_COPY_SOURCE_IF_MATCH_AMZ] = []string{input.CopySourceIfMatch}
+ }
+ if input.CopySourceIfNoneMatch != "" {
+ headers[HEADER_COPY_SOURCE_IF_NONE_MATCH_AMZ] = []string{input.CopySourceIfNoneMatch}
+ }
+ if !input.CopySourceIfModifiedSince.IsZero() {
+ headers[HEADER_COPY_SOURCE_IF_MODIFIED_SINCE_AMZ] = []string{FormatUtcToRfc1123(input.CopySourceIfModifiedSince)}
+ }
+ if !input.CopySourceIfUnmodifiedSince.IsZero() {
+ headers[HEADER_COPY_SOURCE_IF_UNMODIFIED_SINCE_AMZ] = []string{FormatUtcToRfc1123(input.CopySourceIfUnmodifiedSince)}
+ }
+ if input.SourceSseHeader != nil {
+ if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
+ headers[HEADER_SSEC_COPY_SOURCE_ENCRYPTION_AMZ] = []string{sseCHeader.GetEncryption()}
+ headers[HEADER_SSEC_COPY_SOURCE_KEY_AMZ] = []string{sseCHeader.GetKey()}
+ headers[HEADER_SSEC_COPY_SOURCE_KEY_MD5_AMZ] = []string{sseCHeader.GetKeyMD5()}
+ }
+ }
+ return
+}
+
+func (input AbortMultipartUploadInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{"uploadId": input.UploadId}
+ return
+}
+
+func (input InitiateMultipartUploadInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params, headers, data = input.ObjectOperationInput.trans()
+ params[string(SubResourceUploads)] = ""
+ return
+}
+
+func (input UploadPartInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
+ headers = make(map[string][]string)
+ setSseHeader(headers, input.SseHeader, true)
+ if input.Body != nil {
+ data = input.Body
+ }
+ return
+}
+
+func (input CompleteMultipartUploadInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{"uploadId": input.UploadId}
+ data, _ = ConvertCompleteMultipartUploadInputToXml(input, false)
+ return
+}
+
+func (input ListPartsInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{"uploadId": input.UploadId}
+ if input.MaxParts > 0 {
+ params["max-parts"] = IntToString(input.MaxParts)
+ }
+ if input.PartNumberMarker > 0 {
+ params["part-number-marker"] = IntToString(input.PartNumberMarker)
+ }
+ return
+}
+
+func (input CopyPartInput) trans() (params map[string]string, headers map[string][]string, data interface{}) {
+ params = map[string]string{"uploadId": input.UploadId, "partNumber": IntToString(input.PartNumber)}
+ headers = make(map[string][]string, 1)
+ var copySource string
+ if input.CopySourceVersionId != "" {
+ copySource = fmt.Sprintf("%s/%s?versionId=%s", input.CopySourceBucket, input.CopySourceKey, input.CopySourceVersionId)
+ } else {
+ copySource = fmt.Sprintf("%s/%s", input.CopySourceBucket, input.CopySourceKey)
+ }
+ headers[HEADER_COPY_SOURCE_AMZ] = []string{copySource}
+
+ if input.CopySourceRangeStart >= 0 && input.CopySourceRangeEnd > input.CopySourceRangeStart {
+ headers[HEADER_COPY_SOURCE_RANGE_AMZ] = []string{fmt.Sprintf("bytes=%d-%d", input.CopySourceRangeStart, input.CopySourceRangeEnd)}
+ }
+
+ setSseHeader(headers, input.SseHeader, true)
+ if input.SourceSseHeader != nil {
+ if sseCHeader, ok := input.SourceSseHeader.(SseCHeader); ok {
+ headers[HEADER_SSEC_COPY_SOURCE_ENCRYPTION_AMZ] = []string{sseCHeader.GetEncryption()}
+ headers[HEADER_SSEC_COPY_SOURCE_KEY_AMZ] = []string{sseCHeader.GetKey()}
+ headers[HEADER_SSEC_COPY_SOURCE_KEY_MD5_AMZ] = []string{sseCHeader.GetKeyMD5()}
+ }
+ }
+ return
+}
+
+type partSlice []Part
+
+func (parts partSlice) Len() int {
+ return len(parts)
+}
+
+func (parts partSlice) Less(i, j int) bool {
+ return parts[i].PartNumber < parts[j].PartNumber
+}
+
+func (parts partSlice) Swap(i, j int) {
+ parts[i], parts[j] = parts[j], parts[i]
+}
+
+type readerWrapper struct {
+ reader io.Reader
+ mark int64
+ totalCount int64
+ readedCount int64
+}
+
+func (rw *readerWrapper) seek(offset int64, whence int) (int64, error) {
+ if r, ok := rw.reader.(*strings.Reader); ok {
+ return r.Seek(offset, whence)
+ } else if r, ok := rw.reader.(*bytes.Reader); ok {
+ return r.Seek(offset, whence)
+ } else if r, ok := rw.reader.(*os.File); ok {
+ return r.Seek(offset, whence)
+ }
+ return offset, nil
+}
+
+func (rw *readerWrapper) Read(p []byte) (n int, err error) {
+ if rw.totalCount == 0 {
+ return 0, io.EOF
+ }
+ if rw.totalCount > 0 {
+ n, err = rw.reader.Read(p)
+ readedOnce := int64(n)
+ if remainCount := rw.totalCount - rw.readedCount; remainCount > readedOnce {
+ rw.readedCount += readedOnce
+ return n, err
+ } else {
+ rw.readedCount += remainCount
+ return int(remainCount), io.EOF
+ }
+ }
+ return rw.reader.Read(p)
+}
+
+type fileReaderWrapper struct {
+ readerWrapper
+ filePath string
+}
diff --git a/openstack/obs/util.go b/openstack/obs/util.go
new file mode 100644
index 000000000..7edadadd9
--- /dev/null
+++ b/openstack/obs/util.go
@@ -0,0 +1,140 @@
+package obs
+
+import (
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var regex = regexp.MustCompile("^[\u4e00-\u9fa5]$")
+
+func StringToInt(value string, def int) int {
+ ret, err := strconv.Atoi(value)
+ if err != nil {
+ ret = def
+ }
+ return ret
+}
+
+func StringToInt64(value string, def int64) int64 {
+ ret, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ ret = def
+ }
+ return ret
+}
+
+func IntToString(value int) string {
+ return strconv.Itoa(value)
+}
+
+func Int64ToString(value int64) string {
+ return strconv.FormatInt(value, 10)
+}
+
+func GetCurrentTimestamp() int64 {
+ return time.Now().UnixNano() / 1000000
+}
+
+func FormatUtcNow(format string) string {
+ return time.Now().UTC().Format(format)
+}
+
+func FormatUtcToRfc1123(t time.Time) string {
+ ret := t.UTC().Format(time.RFC1123)
+ return ret[:strings.LastIndex(ret, "UTC")] + "GMT"
+}
+
+func Md5(value []byte) []byte {
+ m := md5.New()
+ m.Write(value)
+ return m.Sum(nil)
+}
+
+func HmacSha1(key, value []byte) []byte {
+ mac := hmac.New(sha1.New, key)
+ mac.Write(value)
+ return mac.Sum(nil)
+}
+
+func HmacSha256(key, value []byte) []byte {
+ mac := hmac.New(sha256.New, key)
+ mac.Write(value)
+ return mac.Sum(nil)
+}
+
+func Base64Encode(value []byte) string {
+ return base64.StdEncoding.EncodeToString(value)
+}
+
+func Base64Decode(value string) ([]byte, error) {
+ return base64.StdEncoding.DecodeString(value)
+}
+
+func HexMd5(value []byte) string {
+ return Hex(Md5(value))
+}
+
+func Base64Md5(value []byte) string {
+ return Base64Encode(Md5(value))
+}
+
+func Sha256Hash(value []byte) []byte {
+ hash := sha256.New()
+ hash.Write(value)
+ return hash.Sum(nil)
+}
+
+func ParseXml(value []byte, result interface{}) error {
+ if len(value) == 0 {
+ return nil
+ }
+ return xml.Unmarshal(value, result)
+}
+
+func TransToXml(value interface{}) ([]byte, error) {
+ if value == nil {
+ return []byte{}, nil
+ }
+ return xml.Marshal(value)
+}
+
+func Hex(value []byte) string {
+ return hex.EncodeToString(value)
+}
+
+func HexSha256(value []byte) string {
+ return Hex(Sha256Hash(value))
+}
+
+func UrlDecode(value string) (string, error) {
+ ret, err := url.QueryUnescape(value)
+ if err == nil {
+ return ret, nil
+ }
+ return "", err
+}
+
+func UrlEncode(value string, chineseOnly bool) string {
+ if chineseOnly {
+ values := make([]string, 0, len(value))
+ for _, val := range value {
+ _value := string(val)
+ if regex.MatchString(_value) {
+ _value = url.QueryEscape(_value)
+ }
+ values = append(values, _value)
+ }
+ return strings.Join(values, "")
+ }
+ return url.QueryEscape(value)
+}