Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Original S3 library (might not work anymore?)

New AWS CloudWatch integration code.
  • Loading branch information...
commit 92b41419e8bb369529b16cf8c846113a7ff343d4 1 parent dd9e09d
@berenddeboer authored
Showing with 2,729 additions and 0 deletions.
  1. +16 −0 .gitignore
  2. +236 −0 library/cloudwatch/aws_cloudwatch.e
  3. +84 −0 library/cloudwatch/aws_metric_datum.e
  4. +50 −0 library/documents/CompleteMultipartUploadResult_document.e
  5. +81 −0 library/documents/CompleteMultipartUploadResult_template.e
  6. +50 −0 library/documents/Error_document.e
  7. +81 −0 library/documents/Error_template.e
  8. +50 −0 library/documents/InitiateMultipartUploadResult_document.e
  9. +76 −0 library/documents/InitiateMultipartUploadResult_template.e
  10. +20 −0 library/library.xace
  11. +39 −0 library/s3/s3_access_key.e
  12. +388 −0 library/s3/s3_client.e
  13. +63 −0 library/s3/s3_verbose_routines.e
  14. +7 −0 library/samples/CompleteMultipartUploadResult.xml
  15. +6 −0 library/samples/Error.xml
  16. +6 −0 library/samples/InitiateMultipartUploadResult.xml
  17. +110 −0 library/tools/s3_tool.e
  18. +25 −0 src/s3cat/build.eant
  19. +240 −0 src/s3cat/s3_cat.e
  20. +37 −0 src/s3cat/system.xace
  21. +25 −0 src/s3ls/build.eant
  22. +109 −0 src/s3ls/s3_ls.e
  23. +33 −0 src/s3ls/system.xace
  24. +25 −0 src/s3store/build.eant
  25. +138 −0 src/s3store/s3_store.e
  26. +424 −0 src/s3store/s3_writer.e
  27. +30 −0 src/s3store/system.xace
  28. +25 −0 test/cloudwatch/build.eant
  29. +14 −0 test/cloudwatch/getest.cfg
  30. +34 −0 test/cloudwatch/system.xace
  31. +64 −0 test/cloudwatch/test_cloudwatch.e
  32. +27 −0 test/s3/build.eant
  33. +14 −0 test/s3/getest.cfg
  34. +37 −0 test/s3/system.xace
  35. +65 −0 test/s3/test_s3.e
View
16 .gitignore
@@ -0,0 +1,16 @@
+*~
+*.[acho]
+*.id
+*.log
+*.melted
+*.sh
+compile_ise.ecf
+compile_se.ace
+compile_ge.xace
+EIFGENs
+TESTGEN
+TAGS
+src/s3cat/s3cat
+src/s3ls/s3ls
+src/s3store/s3store
+test/cloudwatch/cloudwatch_test
View
236 library/cloudwatch/aws_cloudwatch.e
@@ -0,0 +1,236 @@
+note
+
+ description:
+
+ "Interface to Amazon CloudWatch"
+
+ library: "Amazon CloudWatch library"
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2012, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+
+
+class
+
+ AWS_CLOUDWATCH
+
+
+inherit
+
+ EPX_HTTP_11_CLIENT
+ rename
+ make as make_http_11_client
+ end
+
+ UT_URL_ENCODING
+ export
+ {NONE} all
+ end
+
+
+create
+
+ make
+
+
+feature {NONE} -- Initialisation
+
+ make (an_access_key_id, a_secret_access_key: READABLE_STRING_GENERAL)
+ require
+ access_key_has_correct_length: an_access_key_id /= Void and then an_access_key_id.count = 20
+ secret_key_has_correct_length: a_secret_access_key /= Void and then a_secret_access_key.count = 40
+ do
+ make_secure (aws_cloudwatch_host_name)
+ access_key_id := an_access_key_id
+ create hasher.make (a_secret_access_key.out, create {EPX_SHA1_CALCULATION}.make)
+ end
+
+
+feature -- Access
+
+ access_key_id: READABLE_STRING_GENERAL
+ -- Access Key ID (a 20-character, alphanumeric sequence)
+
+ aws_cloudwatch_host_name: STRING = "monitoring.amazonaws.com"
+ --aws_cloudwatch_host_name: STRING = "monitoring.us-east-1.amazonaws.com"
+
+ cloudwatch_version: STRING = "2010-08-01"
+
+ cloudwatch_path: STRING = "/"
+
+
+feature -- CloudWatch API
+
+ list_metrics (a_name_space, a_metric_name: READABLE_STRING_GENERAL)
+ local
+ kv: EPX_KEY_VALUE
+ data: DS_LINKED_LIST [EPX_KEY_VALUE]
+ form: EPX_MIME_FORM
+ do
+ data := new_data ("ListMetrics", a_name_space)
+ create kv.make ("MetricName", a_metric_name.out)
+ data.put_last (kv)
+ data.put_last (new_signature (http_method_POST, cloudwatch_path, data))
+ create form.make_form_urlencoded (data.to_array)
+ post (cloudwatch_path, form)
+ read_response
+ end
+
+ put_metric_data (a_name_space: READABLE_STRING_GENERAL; a_data_points: DS_LINEAR [AWS_METRIC_DATUM])
+ local
+ i: INTEGER
+ kv: EPX_KEY_VALUE
+ data: DS_LINKED_LIST [EPX_KEY_VALUE]
+ form: EPX_MIME_FORM
+ do
+ data := new_data ("PutMetricData", a_name_space)
+ from
+ i := 1
+ a_data_points.start
+ until
+ a_data_points.after
+ loop
+ create kv.make ("MetricData.member." + i.out + ".MetricName", a_data_points.item_for_iteration.name.out)
+ data.put_last (kv)
+ create kv.make ("MetricData.member." + i.out + ".Unit", a_data_points.item_for_iteration.unit.out)
+ data.put_last (kv)
+ create kv.make ("MetricData.member." + i.out + ".Value", a_data_points.item_for_iteration.value.out)
+ data.put_last (kv)
+ create kv.make ("MetricData.member." + i.out + ".Timestamp", a_data_points.item_for_iteration.timestamp.as_iso_8601)
+ data.put_last (kv)
+ a_data_points.forth
+ end
+ data.put_last (new_signature (http_method_POST, cloudwatch_path, data))
+ create form.make_form_urlencoded (data.to_array)
+ post (cloudwatch_path + "?Action=PutMetricData", form)
+ read_response
+ end
+
+
+feature {NONE} -- Request signing
+
+ new_signature (a_verb, a_path: READABLE_STRING_GENERAL; a_data: DS_LINEAR [EPX_KEY_VALUE]): EPX_KEY_VALUE
+ require
+ a_verb_not_empty: a_verb /= Void and then not a_verb.is_empty
+ a_path_not_empty: a_path /= Void and then not a_path.is_empty
+ a_data_not_void: a_data /= Void
+ do
+ create Result.make ("Signature", signature (a_verb, a_path, a_data))
+ end
+
+ hasher: EPX_HMAC_CALCULATION
+
+ signature (a_verb, a_path: READABLE_STRING_GENERAL; a_data: DS_LINEAR [EPX_KEY_VALUE]): STRING
+ -- Signature as per
+ -- http://docs.amazonwebservices.com/AmazonCloudWatch/latest/DeveloperGuide/choosing_your_cloudwatch_interface.html#Using_Query_API
+ require
+ a_verb_not_empty: a_verb /= Void and then not a_verb.is_empty
+ a_path_not_empty: a_path /= Void and then not a_path.is_empty
+ a_data_not_void: a_data /= Void
+ do
+ if hasher.is_checksum_available then
+ hasher.wipe_out
+ end
+ hasher.put_string (string_to_sign (a_verb, a_path, a_data))
+ hasher.finalize
+
+ Result := as_base64 (hasher.binary_checksum)
+ ensure
+ not_empty: Result /= Void and then not Result.is_empty
+ end
+
+ string_to_sign (a_verb, a_path: READABLE_STRING_GENERAL; a_data: DS_LINEAR [EPX_KEY_VALUE]): STRING
+ -- String to sign
+ -- http://docs.amazonwebservices.com/AmazonCloudWatch/latest/DeveloperGuide/choosing_your_cloudwatch_interface.html#Using_Query_API
+ require
+ a_verb_not_empty: a_verb /= Void and then not a_verb.is_empty
+ a_path_not_empty: a_path /= Void and then not a_path.is_empty
+ a_data_not_void: a_data /= Void
+ local
+ l: DS_ARRAYED_LIST [EPX_KEY_VALUE]
+ sorter: DS_BUBBLE_SORTER [EPX_KEY_VALUE]
+ do
+ -- Sort fields
+ create sorter.make (create {EPX_KEY_VALUE_COMPARATOR})
+ create l.make (a_data.count)
+ from
+ a_data.start
+ until
+ a_data.after
+ loop
+ l.put_last (a_data.item_for_iteration)
+ a_data.forth
+ end
+ l.sort (sorter)
+
+ create Result.make (256)
+ Result.append_string (a_verb.out)
+ Result.append_character ('%N')
+ Result.append_string (server_name)
+ Result.append_character ('%N')
+ Result.append_string (a_path.out)
+ Result.append_character ('%N')
+ from
+ l.start
+ until
+ l.after
+ loop
+ Result.append_string (l.item_for_iteration.key)
+ Result.append_character ('=')
+ Result.append_string (escape_string (l.item_for_iteration.value))
+ l.forth
+ if not l.after then
+ Result.append_character ('&')
+ end
+ end
+ ensure
+ not_empty: Result /= Void and then not Result.is_empty
+ end
+
+ as_base64 (buf: STDC_BUFFER): STRING
+ -- Entire buffer in base64 encoding
+ require
+ buf_not_void: buf /= Void
+ local
+ output: KL_STRING_OUTPUT_STREAM
+ base64: UT_BASE64_ENCODING_OUTPUT_STREAM
+ do
+ create Result.make (hasher.hash_output_length * 2)
+ create output.make (Result)
+ create base64.make (output, False, False)
+ base64.put_string (buf.substring (0, buf.capacity-1))
+ base64.close
+ ensure
+ not_empty: Result /= Void and then not Result.is_empty
+ end
+
+
+feature {NONE} -- Implementation
+
+ new_data (an_action, a_name_space: READABLE_STRING_GENERAL): DS_LINKED_LIST [EPX_KEY_VALUE]
+ local
+ kv: EPX_KEY_VALUE
+ now: STDC_TIME
+ do
+ create Result.make
+ create kv.make ("AWSAccessKeyId", access_key_id.out)
+ Result.put_last (kv)
+ create kv.make ("SignatureVersion", "2")
+ Result.put_last (kv)
+ create kv.make ("SignatureMethod", "HmacSHA1")
+ Result.put_last (kv)
+ create kv.make ("Action", an_action.out)
+ Result.put_last (kv)
+ create kv.make ("Version", cloudwatch_version)
+ Result.put_last (kv)
+ create kv.make ("Namespace", a_name_space.out)
+ Result.put_last (kv)
+ create now.make_from_now
+ now.to_utc
+ create kv.make ("Timestamp", now.as_iso_8601.out)
+ Result.put_last (kv)
+ ensure
+ not_void: Result /= Void
+ end
+
+end
View
84 library/cloudwatch/aws_metric_datum.e
@@ -0,0 +1,84 @@
+indexing
+
+ description:
+
+ "CloudWatch data point"
+
+ library: "AWS library"
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2012, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+
+
+class
+
+ AWS_METRIC_DATUM
+
+
+create
+
+ make
+
+
+feature {NONE} -- Initialisation
+
+ make (a_name: READABLE_STRING_GENERAL; a_value: DOUBLE; a_unit: READABLE_STRING_GENERAL; a_timestamp: EPX_TIME)
+ -- If timestamp not set, it is initialised to now
+ require
+ name_not_empty: is_valid_name (a_name)
+ unit_valid: is_valid_unit (a_unit)
+ do
+ name := a_name
+ value := a_value
+ unit := a_unit
+ if a_timestamp = Void then
+ create timestamp.make_from_now
+ timestamp.to_utc
+ else
+ timestamp := a_timestamp
+ end
+ end
+
+
+feature -- Access
+
+ name: READABLE_STRING_GENERAL
+ -- The name of the metric
+
+ unit: READABLE_STRING_GENERAL
+ -- Valid Values: Seconds | Microseconds | Milliseconds |
+ -- Bytes | Kilobytes | Megabytes | Gigabytes | Terabytes |
+ -- Bits | Kilobits | Megabits | Gigabits | Terabits | Percent
+ -- | Count | Bytes/Second | Kilobytes/Second |
+ -- Megabytes/Second | Gigabytes/Second | Terabytes/Second |
+ -- Bits/Second | Kilobits/Second | Megabits/Second |
+ -- Gigabits/Second | Terabits/Second | Count/Second | None
+
+ value: DOUBLE
+ -- Although the Value parameter accepts numbers of type
+ -- Double, Amazon CloudWatch truncates values with very large
+ -- exponents. Values with base-10 exponents greater than 126
+ -- (1 x 10^126) are truncated. Likewise, values with base-10
+ -- exponents less than -130 (1 x 10^-130) are also truncated.
+
+ timestamp: EPX_TIME
+ -- The time stamp used for the datapoint;
+ -- The time stamp used for the metric. If not specified, the
+ -- default value is set to the time the metric data was
+ -- received.
+
+
+feature -- Status
+
+ is_valid_name (a_name: READABLE_STRING_GENERAL): BOOLEAN
+ do
+ Result := a_name /= Void and then not a_name.is_empty and then a_name.count <= 255
+ end
+
+ is_valid_unit (a_unit: READABLE_STRING_GENERAL): BOOLEAN
+ do
+ Result := a_unit /= Void and then not a_unit.is_empty
+ end
+
+
+end
View
50 library/documents/CompleteMultipartUploadResult_document.e
@@ -0,0 +1,50 @@
+note
+
+ description: "Generated class"
+
+class
+
+ COMPLETEMULTIPARTUPLOADRESULT_DOCUMENT
+
+inherit
+
+ GEDXML_DOCUMENT
+ rename
+ document_element as CompleteMultipartUploadResult
+ redefine
+ CompleteMultipartUploadResult
+ end
+
+create
+
+ make,
+ make_from_file,
+ make_from_stream,
+ make_from_string
+
+feature -- Status
+
+ has_document_element: BOOLEAN
+ do
+ Result := template.CompleteMultipartUploadResult /= Void
+ end
+
+feature -- Access
+
+ CompleteMultipartUploadResult: like template.CompleteMultipartUploadResult
+ assign
+ set_document_element
+
+ template: COMPLETEMULTIPARTUPLOADRESULT_TEMPLATE
+ once
+ create Result.make
+ end
+
+feature -- Change
+
+ assign_document_element
+ do
+ CompleteMultipartUploadResult := template.CompleteMultipartUploadResult
+ end
+
+end
View
81 library/documents/CompleteMultipartUploadResult_template.e
@@ -0,0 +1,81 @@
+note
+
+description: "Generated class"
+
+class
+
+ COMPLETEMULTIPARTUPLOADRESULT_TEMPLATE
+
+inherit
+
+ GEDXML_TEMPLATE
+
+create
+
+ make
+
+feature -- Elements
+
+ CompleteMultipartUploadResult: TUPLE [
+ Location: STRING
+ Bucket: STRING
+ Key: STRING
+ ETag: STRING
+ ]
+
+feature -- Template reuse
+
+ wipe_out
+ do
+ CompleteMultipartUploadResult := Void
+ end
+
+feature -- Element matching
+
+ root_start_matcher: DS_HASH_TABLE [PROCEDURE [ANY, TUPLE []], STRING]
+ once
+ create Result.make (1)
+ Result.put (agent
+ do
+ create CompleteMultipartUploadResult
+ save_matchers
+ start_matchers := CompleteMultipartUploadResult_start_matchers
+ end_matchers := CompleteMultipartUploadResult_end_matchers
+ end, once "CompleteMultipartUploadResult")
+ end
+
+ CompleteMultipartUploadResult_start_matchers: DS_HASH_TABLE [PROCEDURE [ANY, TUPLE []], STRING]
+ once
+ create Result.make (0)
+ ensure
+ not_void: Result /= Void
+ end
+
+ CompleteMultipartUploadResult_end_matchers: DS_HASH_TABLE [PROCEDURE [ANY, TUPLE [STRING]], STRING]
+ once
+ create Result.make (5)
+ Result.put (agent (s: STRING)
+ do
+ CompleteMultipartUploadResult.Location := s.twin
+ end, once "Location")
+ Result.put (agent (s: STRING)
+ do
+ CompleteMultipartUploadResult.Bucket := s.twin
+ end, once "Bucket")
+ Result.put (agent (s: STRING)
+ do
+ CompleteMultipartUploadResult.Key := s.twin
+ end, once "Key")
+ Result.put (agent (s: STRING)
+ do
+ CompleteMultipartUploadResult.ETag := s.twin
+ end, once "ETag")
+ Result.put (agent (s: STRING)
+ do
+ restore_matchers
+ end, once "CompleteMultipartUploadResult")
+ ensure
+ not_void: Result /= Void
+ end
+
+end
View
50 library/documents/Error_document.e
@@ -0,0 +1,50 @@
+note
+
+ description: "Generated class"
+
+class
+
+ ERROR_DOCUMENT
+
+inherit
+
+ GEDXML_DOCUMENT
+ rename
+ document_element as Error
+ redefine
+ Error
+ end
+
+create
+
+ make,
+ make_from_file,
+ make_from_stream,
+ make_from_string
+
+feature -- Status
+
+ has_document_element: BOOLEAN
+ do
+ Result := template.Error /= Void
+ end
+
+feature -- Access
+
+ Error: like template.Error
+ assign
+ set_document_element
+
+ template: ERROR_TEMPLATE
+ once
+ create Result.make
+ end
+
+feature -- Change
+
+ assign_document_element
+ do
+ Error := template.Error
+ end
+
+end
View
81 library/documents/Error_template.e
@@ -0,0 +1,81 @@
+note
+
+description: "Generated class"
+
+class
+
+ ERROR_TEMPLATE
+
+inherit
+
+ GEDXML_TEMPLATE
+
+create
+
+ make
+
+feature -- Elements
+
+ Error: TUPLE [
+ Code: STRING
+ Message: STRING
+ RequestId: STRING
+ HostId: STRING
+ ]
+
+feature -- Template reuse
+
+ wipe_out
+ do
+ Error := Void
+ end
+
+feature -- Element matching
+
+ root_start_matcher: DS_HASH_TABLE [PROCEDURE [ANY, TUPLE []], STRING]
+ once
+ create Result.make (1)
+ Result.put (agent
+ do
+ create Error
+ save_matchers
+ start_matchers := Error_start_matchers
+ end_matchers := Error_end_matchers
+ end, once "Error")
+ end
+
+ Error_start_matchers: DS_HASH_TABLE [PROCEDURE [ANY, TUPLE []], STRING]
+ once
+ create Result.make (0)
+ ensure
+ not_void: Result /= Void
+ end
+
+ Error_end_matchers: DS_HASH_TABLE [PROCEDURE [ANY, TUPLE [STRING]], STRING]
+ once
+ create Result.make (5)
+ Result.put (agent (s: STRING)
+ do
+ Error.Code := s.twin
+ end, once "Code")
+ Result.put (agent (s: STRING)
+ do
+ Error.Message := s.twin
+ end, once "Message")
+ Result.put (agent (s: STRING)
+ do
+ Error.RequestId := s.twin
+ end, once "RequestId")
+ Result.put (agent (s: STRING)
+ do
+ Error.HostId := s.twin
+ end, once "HostId")
+ Result.put (agent (s: STRING)
+ do
+ restore_matchers
+ end, once "Error")
+ ensure
+ not_void: Result /= Void
+ end
+
+end
View
50 library/documents/InitiateMultipartUploadResult_document.e
@@ -0,0 +1,50 @@
+note
+
+ description: "Generated class"
+
+class
+
+ INITIATEMULTIPARTUPLOADRESULT_DOCUMENT
+
+inherit
+
+ GEDXML_DOCUMENT
+ rename
+ document_element as InitiateMultipartUploadResult
+ redefine
+ InitiateMultipartUploadResult
+ end
+
+create
+
+ make,
+ make_from_file,
+ make_from_stream,
+ make_from_string
+
+feature -- Status
+
+ has_document_element: BOOLEAN
+ do
+ Result := template.InitiateMultipartUploadResult /= Void
+ end
+
+feature -- Access
+
+ InitiateMultipartUploadResult: like template.InitiateMultipartUploadResult
+ assign
+ set_document_element
+
+ template: INITIATEMULTIPARTUPLOADRESULT_TEMPLATE
+ once
+ create Result.make
+ end
+
+feature -- Change
+
+ assign_document_element
+ do
+ InitiateMultipartUploadResult := template.InitiateMultipartUploadResult
+ end
+
+end
View
76 library/documents/InitiateMultipartUploadResult_template.e
@@ -0,0 +1,76 @@
+note
+
+description: "Generated class"
+
+class
+
+ INITIATEMULTIPARTUPLOADRESULT_TEMPLATE
+
+inherit
+
+ GEDXML_TEMPLATE
+
+create
+
+ make
+
+feature -- Elements
+
+ InitiateMultipartUploadResult: TUPLE [
+ Bucket: STRING
+ Key: STRING
+ UploadId: STRING
+ ]
+
+feature -- Template reuse
+
+ wipe_out
+ do
+ InitiateMultipartUploadResult := Void
+ end
+
+feature -- Element matching
+
+ root_start_matcher: DS_HASH_TABLE [PROCEDURE [ANY, TUPLE []], STRING]
+ once
+ create Result.make (1)
+ Result.put (agent
+ do
+ create InitiateMultipartUploadResult
+ save_matchers
+ start_matchers := InitiateMultipartUploadResult_start_matchers
+ end_matchers := InitiateMultipartUploadResult_end_matchers
+ end, once "InitiateMultipartUploadResult")
+ end
+
+ InitiateMultipartUploadResult_start_matchers: DS_HASH_TABLE [PROCEDURE [ANY, TUPLE []], STRING]
+ once
+ create Result.make (0)
+ ensure
+ not_void: Result /= Void
+ end
+
+ InitiateMultipartUploadResult_end_matchers: DS_HASH_TABLE [PROCEDURE [ANY, TUPLE [STRING]], STRING]
+ once
+ create Result.make (4)
+ Result.put (agent (s: STRING)
+ do
+ InitiateMultipartUploadResult.Bucket := s.twin
+ end, once "Bucket")
+ Result.put (agent (s: STRING)
+ do
+ InitiateMultipartUploadResult.Key := s.twin
+ end, once "Key")
+ Result.put (agent (s: STRING)
+ do
+ InitiateMultipartUploadResult.UploadId := s.twin
+ end, once "UploadId")
+ Result.put (agent (s: STRING)
+ do
+ restore_matchers
+ end, once "InitiateMultipartUploadResult")
+ ensure
+ not_void: Result /= Void
+ end
+
+end
View
20 library/library.xace
@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+
+<library name="aws" prefix="aws_">
+
+ <description>
+ description: "Xace file for the Eiffel AWS Library"
+ library: "AWS Eiffel"
+ author: "Berend de Boer [berend@pobox.com]"
+ copyright: "Copyright (c) 2008-2012, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ </description>
+
+ <cluster name="aws" location="${AWS_EIFFEL}/library">
+ <cluster name="cloudwatch"/>
+ <cluster name="documents"/>
+ <cluster name="s3"/>
+ <cluster name="tools"/>
+ </cluster>
+
+</library>
View
39 library/s3/s3_access_key.e
@@ -0,0 +1,39 @@
+indexing
+
+ description:
+
+ "Default access keys"
+
+ library: "s3 library"
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2008, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ date: "$Date$"
+ revision: "$Revision$"
+
+
+class
+
+ S3_ACCESS_KEY
+
+
+feature -- Access
+
+ access_key_id: STRING is
+ local
+ env: STDC_ENV_VAR
+ once
+ create env.make (once "S3_ACCESS_KEY_ID")
+ Result := env.value
+ end
+
+ secret_access_key: STRING is
+ local
+ env: STDC_ENV_VAR
+ once
+ create env.make (once "S3_SECRET_ACCESS_KEY")
+ Result := env.value
+ end
+
+
+end
View
388 library/s3/s3_client.e
@@ -0,0 +1,388 @@
+note
+
+ description:
+
+ "S3 REST interface"
+
+ library: "S3 library"
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2008-2011, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ date: "$Date$"
+ revision: "$Revision$"
+
+
+class
+
+ S3_CLIENT
+
+
+inherit
+
+ EPX_HTTP_11_CLIENT
+ rename
+ make as make_http_11_client
+ redefine
+ append_other_fields
+ end
+
+
+inherit {NONE}
+
+ UC_SHARED_STRING_EQUALITY_TESTER
+
+
+create
+
+ make
+
+
+
+feature {NONE} -- Initialization
+
+ make (an_access_key_id, a_secret_access_key, a_region, a_bucket: STRING)
+ require
+ access_key_has_correct_length: an_access_key_id /= Void and then an_access_key_id.count = 20
+ secret_key_has_correct_length: a_secret_access_key /= Void and then a_secret_access_key.count = 40
+ a_bucket_not_empty: a_bucket /= Void and then not a_bucket.is_empty
+ do
+ region := a_region
+ bucket := a_bucket
+ if region = Void or else region.is_empty then
+ s3_host_name := bucket + ".s3.amazonaws.com"
+ else
+ s3_host_name := bucket + ".s3-" + region + ".amazonaws.com"
+ end
+ make_http_11_client (s3_host_name)
+ access_key_id := an_access_key_id
+ create hasher.make (a_secret_access_key, create {EPX_SHA1_CALCULATION}.make)
+ end
+
+
+feature -- Amazon primitives
+
+ get_object (a_bucket, a_key: STRING)
+ -- Send GET request for an object to S3.
+ require
+ a_bucket_not_empty: a_bucket /= Void and then not a_bucket.is_empty
+ a_key_not_empty: a_key /= Void and then not a_key.is_empty
+ do
+ get (once "/" + a_bucket + once "/" + a_key)
+ end
+
+ put_object (a_bucket, a_key: STRING; a_size: INTEGER)
+ -- Send PUT request for an object to S3.
+ -- Assumes data itself is written straight to `http'
+ require
+ a_bucket_not_empty: a_bucket /= Void and then not a_bucket.is_empty
+ a_key_not_empty: a_key /= Void and then not a_key.is_empty
+ size_not_negative: a_size >= 0
+ local
+ data: EPX_MIME_PART
+ do
+ create data.make_empty
+ data.header.set_content_type (mime_type_binary, mime_subtype_octet_stream, Void)
+ data.header.set_content_length (a_size)
+ send_request (http_method_POST, once "/" + a_bucket + once "/" + a_key, data)
+ end
+
+
+feature -- Multipart upload
+
+ parts: detachable DS_LINKED_LIST [STRING]
+ -- Keep track of uploaded parts
+
+ multipart_upload_id (an_object_name: STRING): STRING
+ -- A new multipart upload id
+ require
+ an_object_name_not_empty: an_object_name /= Void and then not an_object_name.is_empty
+ not_secure: not is_secure_connection
+ local
+ d: INITIATEMULTIPARTUPLOADRESULT_DOCUMENT
+ do
+ create parts.make
+ post ("/" + an_object_name + "?uploads", Void)
+ read_response
+ if is_response_ok then
+ create d.make_from_string (body.as_string)
+ Result := d.InitiateMultipartUploadResult.UploadId
+ end
+ ensure
+ has_upload_id: is_response_ok implies Result /= Void and then not Result.is_empty
+ end
+
+ begin_part_upload (an_upload_id, an_object_name: STRING; a_size: INTEGER)
+ -- Send upload part request. Afterwards client can use
+ -- `http'.`put_buffer' to write bytes.
+ require
+ an_upload_id_not_empty: an_upload_id /= Void and then not an_upload_id.is_empty
+ an_object_name_not_empty: an_object_name /= Void and then not an_object_name.is_empty
+ size_not_negative: a_size >= 0
+ multipart_upload_started: parts /= Void
+ not_too_many_parts: parts.count <= 10000
+ local
+ data: EPX_MIME_PART
+ do
+ create data.make_empty
+ data.header.set_content_type (mime_type_binary, mime_subtype_octet_stream, Void)
+ data.header.set_content_length (a_size)
+ send_request (http_method_PUT, once "/" + an_object_name + once "?partNumber=" + (parts.count + 1).out + "&uploadId=" + an_upload_id, data)
+ tcp_socket.set_blocking_io (False)
+ end
+
+ end_part_upload
+ local
+ etag: STRING
+ do
+ tcp_socket.set_blocking_io (True)
+ --tcp_socket.shutdown_write
+ read_response
+ if is_response_ok then
+ -- Let's hope Amazon never changes the case of their
+ -- header, that would break us.
+ etag := fields.item (once "Etag").value
+ parts.put_last (etag)
+ close
+ end
+ ensure
+ closed: is_response_ok implies not is_open
+ end
+
+ complete_multipart_upload (an_upload_id, an_object_name: STRING)
+ -- Finish an initiated multi-part upload.
+ require
+ an_upload_id_not_empty: an_upload_id /= Void and then not an_upload_id.is_empty
+ an_object_name_not_empty: an_object_name /= Void and then not an_object_name.is_empty
+ multipart_upload_started: parts /= Void
+ local
+ xml: EPX_XML_WRITER
+ data: EPX_MIME_PART
+ i: INTEGER
+ do
+ create xml.make
+ xml.add_header_utf_8_encoding
+ xml.start_tag (once "CompleteMultipartUpload")
+ from
+ parts.start
+ i := 1
+ until
+ parts.after
+ loop
+ xml.start_tag ("Part")
+ xml.add_tag ("PartNumber", i.out)
+ xml.add_tag ("ETag", parts.item_for_iteration)
+ xml.stop_tag
+ parts.forth
+ i := i + 1
+ end
+ xml.stop_tag
+ create data.make_empty
+ data.header.set_content_type (mime_type_binary, mime_subtype_octet_stream, Void)
+ data.create_singlepart_body
+ data.text_body.append_string (xml.as_string)
+ post ("/" + an_object_name + "?uploadId=" + an_upload_id, data)
+ read_response
+ -- It appears we should actually wait here to read the
+ -- response as even a 200 OK might still become a failure.
+ parts := Void
+ end
+
+
+feature -- Amazon higher level functions
+
+ retrieve_object_header_with_retry (a_bucket, a_key: STRING)
+ -- As S3 fails a lot, this function retries up to `max_retries'
+ -- times to send a request and read the response header.
+ -- Retrieving the body may still fail of course.
+ -- It is advisable that `set_continue_on_error' is called
+ -- before to be resilient against network errors as well.
+ require
+ a_bucket_not_empty: a_bucket /= Void and then not a_bucket.is_empty
+ a_key_not_empty: a_key /= Void and then not a_key.is_empty
+ local
+ retries: INTEGER
+ done: BOOLEAN
+ do
+ from
+ until
+ done or else
+ retries > max_retries
+ loop
+ get_object (a_bucket, a_key)
+ if http.errno.is_ok then
+ read_response_header
+ if http.errno.is_ok then
+ done := response_code /= 500 and then response_code /= 502 and then response_code /= 503 and then response_code /= 504
+ end
+ end
+ retries := retries + 1
+ end
+ end
+
+
+feature -- Accesss
+
+ access_key_id: STRING
+ -- Access Key ID (a 20-character, alphanumeric sequence)
+
+ max_retries: INTEGER = 5
+ -- How often should requests be retried
+
+ s3_host_name: STRING
+
+ region: STRING
+ -- Optional region
+
+ bucket: STRING
+ -- Bucket
+
+
+feature {NONE} -- Implementation
+
+ append_other_fields (a_verb, a_path: STRING; a_request_data: EPX_MIME_PART; request: STRING)
+ local
+ now: STDC_TIME
+ date: STRING
+ content_type: STRING
+ uri: UT_URI
+ signature: STRING
+ i: INTEGER
+ parameters: ARRAY [STRING]
+ name_value: STRING
+ name: STRING
+ p: INTEGER
+ first_time: BOOLEAN
+ exclude_acl: BOOLEAN
+ do
+ create uri.make (a_path)
+ if uri.has_query and then uri.query.has_substring ("uploadId=") then
+ exclude_acl := true
+ end
+
+ -- Append Amazon's special signature field
+ if a_request_data /= Void and then a_request_data.header.content_type /= Void then
+ content_type := a_request_data.header.content_type.value
+ end
+ create now.make_from_now
+ now.to_utc
+ date := now.rfc_date_string
+ create signature.make (256)
+ signature.append_string (a_verb)
+ signature.append_character ('%N')
+ -- No MD5
+ signature.append_character ('%N')
+ signature.append_string (content_type)
+ signature.append_character ('%N')
+ signature.append_string (date)
+ if not exclude_acl then
+ signature.append_string (once "%Nx-amz-acl:")
+ signature.append_string (acl)
+ end
+ signature.append_character ('%N')
+ -- Append path
+ signature.append_string ("/" + bucket + uri.path)
+ -- Append sub-resource(s)
+ if uri.has_query then
+ parameters := sh.split_on (uri.query, '&')
+ first_time := true
+ from
+ i := parameters.lower
+ until
+ i > parameters.upper
+ loop
+ name_value := parameters [i]
+ p := name_value.index_of ('=', 1)
+ if p = 0 then
+ name := name_value
+ else
+ name := name_value.substring (1, p - 1)
+ end
+ if sub_resources.has (name) then
+ if first_time then
+ signature.append_character ('?')
+ first_time := false
+ else
+ signature.append_character ('&')
+ end
+ signature.append_string (name_value)
+ end
+ i := i + 1
+ end
+ end
+ if hasher.is_checksum_available then
+ hasher.wipe_out
+ end
+ hasher.put_string (signature)
+ hasher.finalize
+ request.append (once "Date: ")
+ request.append (date)
+ request.append_string (once_new_line)
+ -- ACL header, not applicable for all requests
+ if not exclude_acl then
+ request.append (once "x-amz-acl: ")
+ request.append (acl)
+ request.append_string (once_new_line)
+ end
+ request.append (field_name_authorization)
+ request.append_string (once_colon_space)
+ request.append_string (once "AWS ")
+ request.append_string (access_key_id)
+ request.append_character (':')
+ request.append_string (as_base64 (hasher.binary_checksum))
+ request.append_string (once_new_line)
+ end
+
+ as_base64 (buf: STDC_BUFFER): STRING
+ -- Entire buffer in base64 encoding
+ require
+ buf_not_void: buf /= Void
+ local
+ output: KL_STRING_OUTPUT_STREAM
+ base64: UT_BASE64_ENCODING_OUTPUT_STREAM
+ do
+ create Result.make (hasher.hash_output_length * 2)
+ create output.make (Result)
+ create base64.make (output, False, False)
+ base64.put_string (buf.substring (0, buf.capacity-1))
+ base64.close
+ ensure
+ not_empty: Result /= Void and then not Result.is_empty
+ end
+
+
+feature {NONE} -- Implementation
+
+ acl: STRING = "private"
+
+ hasher: EPX_HMAC_CALCULATION
+
+ sub_resources: DS_HASH_SET [STRING]
+ -- Known sub resources as per http://docs.amazonwebservices.com/AmazonS3/latest/dev/RESTAuthentication.html
+ do
+ create Result.make (14)
+ Result.set_equality_tester (string_equality_tester)
+ Result.put_last ("acl")
+ Result.put_last ("location")
+ Result.put_last ("logging")
+ Result.put_last ("notification")
+ Result.put_last ("partNumber")
+ Result.put_last ("policy")
+ Result.put_last ("requestPayment")
+ Result.put_last ("torrent")
+ Result.put_last ("uploadId")
+ Result.put_last ("uploads")
+ Result.put_last ("versionId")
+ Result.put_last ("versioning")
+ Result.put_last ("versions")
+ Result.put_last ("website")
+ ensure
+ not_void: Result /= Void
+ end
+
+invariant
+
+ access_key_has_correct_length: access_key_id /= Void and then access_key_id.count = 20
+ hasher_not_void: hasher /= Void
+ s3_host_name_not_empty: s3_host_name /= Void and then not s3_host_name.is_empty
+
+end
View
63 library/s3/s3_verbose_routines.e
@@ -0,0 +1,63 @@
+indexing
+
+ description:
+
+ "S3 library"
+
+ library: "s3 library"
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2008, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ date: "$Date$"
+ revision: "$Revision$"
+
+
+class
+
+ S3_VERBOSE_ROUTINES
+
+
+inherit
+
+ ANY
+
+ ST_FORMATTING_ROUTINES
+ export
+ {NONE} all
+ end
+
+
+feature -- Helpers
+
+ formatted_upload_speed (bytes_send: INTEGER_64; start_time: STDC_TIME): STRING is
+ -- Nicely format average speed.
+ require
+ bytes_send_not_negative: bytes_send >= 0
+ start_time_not_void: start_time /= Void
+ local
+ now: STDC_TIME
+ duration: INTEGER
+ per_sec: DOUBLE
+ unit: STRING
+ do
+ create now.make_from_now
+ duration := now.value - start_time.value
+ if duration = 0 then
+ duration := 1
+ end
+ per_sec := bytes_send / duration
+ if per_sec >= 1024 * 1024 then
+ per_sec := per_sec / (1024 * 1024)
+ unit := once "MB"
+ elseif per_sec >= 1024 then
+ per_sec := per_sec / 1024
+ unit := once "KB"
+ else
+ unit := once "bytes"
+ end
+ Result := format (once "$.2f $s/s", <<double_cell (per_sec), unit>>)
+ ensure
+ not_empty: Result /= Void and then not Result.is_empty
+ end
+
+end
View
7 library/samples/CompleteMultipartUploadResult.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Location>http://Example-Bucket.s3.amazonaws.com/Example-Object</Location>
+ <Bucket>Example-Bucket</Bucket>
+ <Key>Example-Object</Key>
+ <ETag>"3858f62230ac3c915f300c664312c11f-9"</ETag>
+</CompleteMultipartUploadResult>
View
6 library/samples/Error.xml
@@ -0,0 +1,6 @@
+<Error>
+ <Code>InternalError</Code>
+ <Message>We encountered an internal error. Please try again.</Message>
+ <RequestId>656c76696e6727732072657175657374</RequestId>
+ <HostId>Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==</HostId>
+</Error>
View
6 library/samples/InitiateMultipartUploadResult.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Bucket>example-bucket</Bucket>
+ <Key>example-object</Key>
+ <UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>
+</InitiateMultipartUploadResult>
View
110 library/tools/s3_tool.e
@@ -0,0 +1,110 @@
+note
+
+ description:
+
+ "Base class for s3 based utilities"
+
+ library: "S3 library"
+
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2011, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ date: "$Date$"
+ revision: "$Revision$"
+
+
+deferred class
+
+ S3_TOOL
+
+
+inherit
+
+ S3_ACCESS_KEY
+
+ S3_VERBOSE_ROUTINES
+ export
+ {NONE} all
+ end
+
+ EPX_CURRENT_PROCESS
+
+
+feature {NONE} -- Initialize
+
+ make
+ -- Initialize and print exception if exception occurs.
+ do
+ make_no_rescue
+ rescue
+ if exceptions.is_developer_exception then
+ fd_stderr.put_line (exceptions.developer_exception_name)
+ else
+ fd_stderr.put_string (once "Exception code: ")
+ fd_stderr.put_line (exceptions.exception.out)
+ end
+ exit_with_failure
+ end
+
+
+ make_no_rescue
+ deferred
+ end
+
+
+feature -- Access
+
+ region: AP_STRING_OPTION
+
+ bucket: AP_STRING_OPTION
+
+ verbose: AP_FLAG
+
+ reduced_redundancy: AP_FLAG
+
+
+feature -- Command-line parsing
+
+ new_default_parser (a_description: STRING): AP_PARSER
+ -- Parser with default options
+ require
+ description_not_empty: a_description /= Void and then not a_description.is_empty
+ do
+ create Result.make
+ Result.set_application_description (a_description)
+ Result.set_parameters_description (once "")
+ create bucket.make ('b', "bucket")
+ bucket.set_description ("Bucket name.")
+ bucket.enable_mandatory
+ Result.options.force_last (bucket)
+ create region.make ('r', "region")
+ region.set_description ("Region.")
+ Result.options.force_last (region)
+ create reduced_redundancy.make ('d', "reduced-redundancy")
+ reduced_redundancy.set_description ("Region.")
+ Result.options.force_last (reduced_redundancy)
+ create verbose.make ('v', "verbose")
+ verbose.set_description ("Verbose output like progress.")
+ Result.options.force_last (verbose)
+ ensure
+ not_void: Result /= Void
+ end
+
+ do_parse_arguments (a_parser: AP_PARSER)
+ -- Parse arguments.
+ require
+ not_void: a_parser /= Void
+ do
+ a_parser.parse_arguments
+ if access_key_id.is_empty then
+ fd_stderr.put_line ("Environment variable S3_ACCESS_KEY_ID not set. It should contain your Amazon access key.")
+ a_parser.help_option.display_usage (a_parser)
+ end
+ if secret_access_key.is_empty then
+ fd_stderr.put_line ("Environment variable S3_SECRET_ACCESS_KEY not set. It should contain your Amazon secret access key.")
+ a_parser.help_option.display_usage (a_parser)
+ end
+ end
+
+
+end
View
25 src/s3cat/build.eant
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+
+<project name="s3cat" default="help">
+
+ <description>
+ system: "S3 tools"
+ copyright: "Copyright (c) 2008 by Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ </description>
+
+ <inherit>
+ <parent location="${GOBO}/misc/eiffel.eant">
+ <redefine target="init_system"/>
+ </parent>
+ </inherit>
+
+
+<!-- Implementation -->
+
+ <target name="init_system">
+ <set name="system" value="s3cat"/>
+ <set name="system_dir" value="${S3}/src/${system}"/>
+ </target>
+
+</project>
View
240 src/s3cat/s3_cat.e
@@ -0,0 +1,240 @@
+indexing
+
+ description:
+
+ "Output object from s3 to stdout"
+
+ library: "s3 tools"
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2008, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ date: "$Date$"
+ revision: "$Revision$"
+
+
+class
+
+ S3_CAT
+
+
+inherit
+
+ S3_ACCESS_KEY
+
+ EPX_CURRENT_PROCESS
+
+ ST_FORMATTING_ROUTINES
+ export
+ {NONE} all
+ end
+
+create
+
+ make,
+ make_no_rescue
+
+
+feature {NONE} -- Initialize
+
+ make is
+ do
+ make_no_rescue
+ rescue
+ if exceptions.is_developer_exception then
+ fd_stderr.put_line (exceptions.developer_exception_name)
+ else
+ fd_stderr.put_string (once "Exception code: ")
+ fd_stderr.put_line (exceptions.exception.out)
+ end
+ exit_with_failure
+ end
+
+ make_no_rescue is
+ local
+ s3: S3_CLIENT
+ reading_parts: BOOLEAN
+ part: INTEGER
+ start: STDC_TIME
+ --total_bytes_received: INTEGER_64
+ bytes_received: INTEGER_64
+ buffer: STDC_BUFFER
+ do
+ parse_arguments
+ create buffer.allocate (buffer_size)
+ create s3.make (access_key_id, secret_access_key)
+ -- Cannot reuse, because we expect an eof in the append code below
+ --s3.set_reuse_connection
+ s3.set_continue_on_error
+ create start.make_from_now
+ s3.retrieve_object_header_with_retry (bucket.parameter, key.parameter)
+ --s3.read_response
+ if s3.response_code = 404 then
+ if verbose.occurrences > 2 then
+ print_http_response_header (s3)
+ end
+ -- Exact name not found, might be a prefix, so check if
+ -- part 0 exists.
+ s3.retrieve_object_header_with_retry (bucket.parameter, key.parameter + format(once ":$020i", <<integer_cell (part)>>))
+ reading_parts := True
+ end
+ print_http_response_header (s3)
+ if s3.response_code = 200 then
+ if verbose.occurrences > 0 then
+ fd_stderr.put_string (once "Downloading " + s3.last_uri)
+ fd_stderr.put_line (" (" + s3.response.header.content_length.length.out + " bytes)")
+ end
+ fd_stdout.put_string (s3.response.text_body.as_string)
+ --fd_stdout.append (s3.http)
+ read_object (s3.http, buffer, s3.response.header.content_length.length)
+ bytes_received := bytes_received + s3.response.header.content_length.length
+ print_download_speed (bytes_received, start)
+ if reading_parts then
+ from
+ part := 1
+ until
+ s3.response_code = 404
+ loop
+ s3.retrieve_object_header_with_retry (bucket.parameter, key.parameter + format(once ":$020i", <<integer_cell (part)>>))
+ print_http_response_header (s3)
+ if s3.response_code = 200 then
+ if verbose.occurrences > 0 then
+ fd_stderr.put_line (once "Downloading " + s3.last_uri)
+ end
+ fd_stdout.put_string (s3.response.text_body.as_string)
+ read_object (s3.http, buffer, s3.response.header.content_length.length)
+ bytes_received := bytes_received + s3.response.header.content_length.length
+ print_download_speed (bytes_received, start)
+ part := part + 1
+ end
+ end
+ end
+ elseif s3.response_code = 403 then
+ fd_stderr.put_line ("Invalid credentials for " + bucket.parameter + "/" + key.parameter)
+ exit_with_failure
+ else
+ fd_stderr.put_line ("Server response: " + s3.response_code.out)
+ fd_stderr.put_line ("No object named " + bucket.parameter + "/" + key.parameter)
+ exit_with_failure
+ end
+ end
+
+
+feature -- Access
+
+ buffer_size: INTEGER is 16384
+
+ bucket: AP_STRING_OPTION
+
+ key: AP_STRING_OPTION
+
+ verbose: AP_FLAG
+
+
+feature {NONE} -- Argument parsing
+
+ parse_arguments is
+ local
+ parser: AP_PARSER
+ do
+ create parser.make
+ parser.set_application_description (once "Output contents of a given S3 object to standard output.")
+ parser.set_parameters_description (once "")
+ create bucket.make ('b', "bucket")
+ bucket.set_description ("Bucket name.")
+ bucket.enable_mandatory
+ parser.options.force_last (bucket)
+ create key.make ('k', "key")
+ key.set_description ("Key name.")
+ key.enable_mandatory
+ parser.options.force_last (key)
+ create verbose.make ('v', "verbose")
+ verbose.set_description ("Verbose output like progress.")
+ parser.options.force_last (verbose)
+
+ parser.parse_arguments
+ if access_key_id.is_empty then
+ fd_stderr.put_line ("Environment variable S3_ACCESS_KEY_ID not set. It should contain your Amazon access key.")
+ parser.help_option.display_usage (parser)
+ end
+ if secret_access_key.is_empty then
+ fd_stderr.put_line ("Environment variable S3_SECRET_ACCESS_KEY not set. It should contain your Amazon secret access key.")
+ parser.help_option.display_usage (parser)
+ end
+ end
+
+
+feature {NONE} -- Implementation
+
+ print_http_response_header (s3: S3_CLIENT) is
+ require
+ s3_not_void: s3 /= Void
+ do
+ if verbose.occurrences > 1 then
+ fd_stderr.put_string (s3.response_code.out)
+ fd_stderr.put_character (' ')
+ fd_stderr.put_line (s3.response_phrase)
+ fd_stderr.put_line (s3.response.header.as_string)
+ end
+ end
+
+ print_download_speed (bytes_received: INTEGER_64; start_time: STDC_TIME) is
+ local
+ now: STDC_TIME
+ duration: INTEGER
+ per_sec: DOUBLE
+ unit: STRING
+ do
+ if verbose.occurrences > 0 then
+ create now.make_from_now
+ duration := now.value - start_time.value
+ if duration = 0 then
+ duration := 1
+ end
+ per_sec := bytes_received / duration
+ if per_sec > 1024 then
+ per_sec := per_sec / 1024
+ unit := once "KB"
+ else
+ unit := once "bytes"
+ end
+ fd_stderr.put_line (format (once "$.2f $s/s", <<double_cell (per_sec), unit>>))
+ end
+ end
+
+ read_object (in: EPX_TEXT_IO_STREAM; buffer: STDC_BUFFER; an_object_size: INTEGER_64) is
+ -- Read a single object of size `an_object_size'.
+ require
+ in_not_void: in /= Void
+ buffer_not_void: buffer /= Void
+ bytes_to_read_not_negative: a_bytes_to_read >= 0
+ local
+ bytes_to_read: INTEGER_64
+ max_to_read: INTEGER
+ do
+ from
+ bytes_to_read := an_object_size
+ if bytes_to_read > buffer.capacity then
+ max_to_read := buffer.capacity
+ else
+ max_to_read := bytes_to_read.to_integer
+ end
+ in.read_buffer (buffer, 0, max_to_read)
+ variant
+ bytes_to_read
+ until
+ bytes_to_read = 0 or else
+ in.end_of_input or else
+ in.errno.is_not_ok
+ loop
+ fd_stdout.put_buffer (buffer, 0, in.last_read)
+ bytes_to_read := bytes_to_read - in.last_read
+ if bytes_to_read > buffer.capacity then
+ max_to_read := buffer.capacity
+ else
+ max_to_read := bytes_to_read.to_integer
+ end
+ in.read_buffer (buffer, 0, max_to_read)
+ end
+ end
+
+end
View
37 src/s3cat/system.xace
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<system name="s3cat">
+ <description>
+ system: "s3output"
+ </description>
+
+ <root class="S3_CAT" creation="make_no_rescue" if="${DEBUG}"/>
+ <root class="S3_CAT" creation="make" unless="${DEBUG}"/>
+
+ <option unless="${DEBUG}">
+ <option name="assertion" value="none"/>
+ <option name="finalize" value="true"/>
+ </option>
+ <option if="${DEBUG}">
+ <option name="split" value="true"/>
+ <option name="debug_tag" value="ejax"/>
+ <option name="assertion" value="all"/>
+ <option name="garbage_collector" value="none"/>
+ <option name="finalize" value="false"/>
+ <option name="debug" value="false"/>
+<!-- <option name="debug_tag" value="http_client"/> -->
+ </option>
+
+ <option if="${GOBO_EIFFEL}=se">
+ <option name="high_memory_compiler" value="true"/>
+ </option>
+
+ <cluster name="myself" location="."/>
+
+ <mount location="${S3}/library/library.xace"/>
+ <mount location="${EPOSIX}/src/library.xace"/>
+ <mount location="${EPOSIX}/test_suite/gobo/gobo.xace" if="${DEBUG}"/>
+ <mount location="${GOBO}/library/library.xace" unless="${DEBUG}"/>
+ <mount location="${EPOSIX}/test_suite/gobo/eiffel.xace" if="${DEBUG}"/>
+ <mount location="${GOBO}/library/kernel.xace" unless="${DEBUG}"/>
+
+</system>
View
25 src/s3ls/build.eant
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+
+<project name="s3ls" default="help">
+
+ <description>
+ system: "S3 tools"
+ copyright: "Copyright (c) 2011 by Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ </description>
+
+ <inherit>
+ <parent location="${GOBO}/misc/eiffel.eant">
+ <redefine target="init_system"/>
+ </parent>
+ </inherit>
+
+
+<!-- Implementation -->
+
+ <target name="init_system">
+ <set name="system" value="s3ls"/>
+ <set name="system_dir" value="${S3}/src/${system}"/>
+ </target>
+
+</project>
View
109 src/s3ls/s3_ls.e
@@ -0,0 +1,109 @@
+note
+
+ description:
+
+ "List contents of bucket"
+
+ library: "s3 tools"
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2011, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ date: "$Date$"
+ revision: "$Revision$"
+
+
+class
+
+ S3_LS
+
+
+inherit
+
+ S3_TOOL
+
+
+create
+
+ make,
+ make_no_rescue
+
+
+feature {NONE} -- Initialize
+
+ make_no_rescue
+ -- Initialize. and run.
+ do
+ parse_arguments
+ list_files
+ end
+
+
+feature -- Commands
+
+ list_files
+ local
+ s3: S3_CLIENT
+ query: STRING
+ do
+ create s3.make (access_key_id, secret_access_key)
+ if region.occurrences > 0 then
+ s3.set_region (region.parameter)
+ end
+ query := "?"
+ if max_keys.occurrences > 0 then
+ query.append_string ("max-keys=" + max_keys.parameter.out)
+ end
+ if prefix_option.occurrences > 0 then
+ if query.count > 1 then
+ query.append_character ('&')
+ end
+ query.append_string ("prefix=" + prefix_option.parameter)
+ end
+ if delimiter.occurrences > 0 then
+ if query.count > 1 then
+ query.append_character ('&')
+ end
+ query.append_string ("delimiter=" + delimiter.parameter)
+ end
+ s3.get ("/" + bucket.parameter + query)
+ s3.read_response_with_redirect
+ if not s3.is_response_ok then
+ stdout.put_line ("Response code: " + s3.response_code.out)
+ end
+ stdout.put (s3.body.as_string)
+ end
+
+
+feature -- Access
+
+ delimiter: AP_STRING_OPTION
+ -- Limits results to ????
+
+ max_keys: AP_INTEGER_OPTION
+ -- Whatever you specify, Amazon might still truncate the results
+
+ prefix_option: AP_STRING_OPTION
+ -- Limits results to those starting with this prefix
+
+
+feature {NONE} -- Argument parsing
+
+ parse_arguments
+ local
+ parser: AP_PARSER
+ do
+ parser := new_default_parser (once "s3ls 0.1.0 (c) by Berend de Boer <berend@pobox.com>%NList contents of bucket.")
+ create max_keys.make ('m', "max-keys")
+ max_keys.set_description ("Maximum number of entries to return.")
+ parser.options.force_last (max_keys)
+ create prefix_option.make ('p', "prefix")
+ prefix_option.set_description ("Limit entries to those starting with this prefix.")
+ parser.options.force_last (prefix_option)
+ create delimiter.make ('d', "delimiter")
+ delimiter.set_description ("Limit entries to those.")
+ parser.options.force_last (delimiter)
+ do_parse_arguments (parser)
+ end
+
+
+end
View
33 src/s3ls/system.xace
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<system name="s3ls">
+ <description>
+ system: "s3ls"
+ </description>
+
+ <root class="S3_LS" creation="make_no_rescue" if="${DEBUG}"/>
+ <root class="S3_LS" creation="make" unless="${DEBUG}"/>
+
+ <option unless="${DEBUG}">
+ <option name="assertion" value="none"/>
+ <option name="garbage_collector" value="none"/>
+ <option name="finalize" value="false"/>
+ </option>
+ <option if="${DEBUG}">
+ <option name="split" value="true"/>
+ <option name="debug_tag" value="ejax"/>
+ <option name="assertion" value="all"/>
+ <option name="garbage_collector" value="none"/>
+ <option name="finalize" value="false"/>
+ <option name="debug" value="false"/>
+<!-- <option name="debug_tag" value="http_client"/> -->
+ </option>
+
+ <cluster name="myself" location="${S3}/src/s3ls"/>
+ <cluster name="tools" location="${S3}/library/tools"/>
+
+ <mount location="${S3}/library/library.xace"/>
+ <mount location="${EPOSIX}/src/library.xace"/>
+ <mount location="${GOBO}/library/library.xace"/>
+ <mount location="${GOBO}/library/kernel.xace"/>
+
+</system>
View
25 src/s3store/build.eant
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+
+<project name="s3store" default="help">
+
+ <description>
+ system: "S3 tools"
+ copyright: "Copyright (c) 2008 by Berend de Boer"
+ license: "MIT License (see LICENSE)"
+ </description>
+
+ <inherit>
+ <parent location="${GOBO}/misc/eiffel.eant">
+ <redefine target="init_system"/>
+ </parent>
+ </inherit>
+
+
+<!-- Implementation -->
+
+ <target name="init_system">
+ <set name="system" value="s3store"/>
+ <set name="system_dir" value="${S3}/src/${system}"/>
+ </target>
+
+</project>
View
138 src/s3store/s3_store.e
@@ -0,0 +1,138 @@
+note
+
+ description:
+
+ "Stream stdout to S3 bucket"
+
+ library: "s3 tools"
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2008-2011, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+
+
+class
+
+ S3_STORE
+
+
+inherit
+
+ S3_TOOL
+
+
+inherit {NONE}
+
+ CAPI_TIME
+
+
+create
+
+ make,
+ make_no_rescue
+
+
+feature {NONE} -- Initialize
+
+ make_no_rescue
+ -- Initialize. and run.
+ local
+ signal: STDC_SIGNAL
+ do
+ parse_arguments
+ create signal.make (SIGPIPE)
+ signal.set_ignore_action
+ signal.apply
+ copy_input_to_s3
+ if verbose.occurrences > 0 then
+ --fd_stderr.put_line (formatted_upload_speed (bytes_sent, start))
+ end
+ end
+
+
+feature -- Access
+
+ buffer_size: INTEGER = 16384
+
+
+feature {NONE} -- Argument parsing
+
+ key: AP_STRING_OPTION
+
+ part_size: AP_INTEGER_OPTION
+
+ nonblocking_io: AP_FLAG
+
+ parse_arguments
+ local
+ parser: AP_PARSER
+ do
+ parser := new_default_parser (once "s3store 1.0 (c) by Berend de Boer <berend@pobox.com>%NStream standard input to a given S3 object.")
+ create key.make ('k', "key")
+ key.set_description ("Key name.")
+ key.enable_mandatory
+ parser.options.force_last (key)
+ create part_size.make ('p', "part-size")
+ part_size.set_description ("Part size in MB; smaller parts mean writing to S3 starts earlier.")
+ parser.options.force_last (part_size)
+ create nonblocking_io.make ('n', "non-blocking-io")
+ nonblocking_io.set_description ("If your S3 write speed is slower than your input speed, non-blocking i/o might give you better performance.")
+ parser.options.force_last (nonblocking_io)
+ do_parse_arguments (parser)
+ end
+
+
+feature -- Writing
+
+ copy_input_to_s3
+ -- Read from stdin, dump to bucket.
+ local
+ writer: S3_WRITER
+ r: STRING
+ start: EPX_TIME
+ last_speed_update: INTEGER
+ now: INTEGER
+ do
+ if region.was_found then
+ r := region.parameter
+ end
+ create writer.make (access_key_id, secret_access_key, r, bucket.parameter, key.parameter, verbose.occurrences)
+ writer.set_verbose (verbose.occurrences)
+ if part_size.was_found then
+ writer.set_part_size (part_size.parameter * 1024 * 1024)
+ end
+ if nonblocking_io.occurrences > 0 then
+ fd_stdin.set_blocking_io (False)
+ end
+ create start.make_from_now
+ last_speed_update := posix_time
+ from
+ until
+ fd_stdin.end_of_input
+ loop
+ writer.write (fd_stdin)
+ if verbose.occurrences > 0 then
+ now := posix_time
+ if now - last_speed_update > 3 then
+ fd_stderr.put_line (formatted_upload_speed (writer.total_bytes_written, start))
+ last_speed_update := posix_time
+ end
+ end
+ end
+ writer.close
+ check
+ as_much_written_as_read: writer.total_bytes_read = writer.total_bytes_written
+ end
+ if verbose.occurrences > 0 then
+ fd_stderr.put_line (formatted_upload_speed (writer.total_bytes_written, start))
+ fd_stderr.put_line ("Finished, wrote a total of " + writer.total_bytes_written.out + " bytes.")
+ fd_stderr.put_line ("Statistics:")
+ fd_stderr.put_line (" non-blocking reads : " + writer.number_of_nonblocking_reads.out)
+ fd_stderr.put_line (" blocking reads : " + writer.number_of_blocking_reads.out)
+ fd_stderr.put_line (" output buffer underflows: " + writer.number_of_output_buffer_underflows.out)
+ fd_stderr.put_line (" output buffer overflows : " + writer.number_of_output_buffer_overflows.out)
+ fd_stderr.put_line (" s3 retries : " + writer.number_of_s3_retries.out)
+ end
+ end
+
+
+end
View
424 src/s3store/s3_writer.e
@@ -0,0 +1,424 @@
+note
+
+ description:
+
+ "Class that can do packet size writes to S3"
+
+ library: "s3 tools"
+ author: "Berend de Boer <berend@pobox.com>"
+ copyright: "Copyright (c) 2009, Berend de Boer"
+ license: "MIT License (see LICENSE)"
+
+
+class
+
+ S3_WRITER
+
+
+inherit {NONE}
+
+ EPX_CURRENT_PROCESS
+
+ KL_IMPORTED_STRING_ROUTINES
+
+
+create
+
+ make
+
+
+feature {NONE} -- Initialization
+
+ make (an_access_key_id, a_secret_access_key, a_region, a_bucket, a_key: STRING; a_verbose: INTEGER)
+ require
+ access_key_has_correct_length: an_access_key_id /= Void and then an_access_key_id.count = 20
+ secret_key_has_correct_length: a_secret_access_key /= Void and then a_secret_access_key.count = 40
+ bucket_not_empty: a_bucket /= Void and then not a_bucket.is_empty
+ key_not_empty: a_key /= Void and then not a_key.is_empty
+ do
+ set_part_size (67108864)
+ create s3.make (an_access_key_id, a_secret_access_key, a_region, a_bucket)
+ set_verbose (a_verbose)
+ key := a_key
+ -- No error handling right now, will built retry when needed
+ s3.set_continue_on_error
+ display_waiting := true
+ end
+
+
+feature -- Status
+
+ something_read,
+ something_written: BOOLEAN
+
+
+feature -- Access
+
+ s3: S3_CLIENT
+
+ key: STRING
+
+ upload_id: detachable STRING
+
+ part_size: INTEGER
+ -- Size of parts to upload; default is 64MB
+
+ total_bytes_read,
+ total_bytes_written: INTEGER_64
+
+ verbose: INTEGER
+
+
+feature -- Counters
+
+ number_of_nonblocking_reads,
+ number_of_blocking_reads,
+ number_of_output_buffer_underflows,
+ number_of_output_buffer_overflows,
+ number_of_s3_retries: INTEGER
+
+feature -- Change
+
+ set_part_size (a_part_size: INTEGER)
+ require
+ minimum_part_size_is_5mb: a_part_size >= 5242880
+ local
+ buffer: EPX_PARTIAL_BUFFER
+ do
+ deallocate_buffers
+ part_size := a_part_size
+ create buffer.allocate (part_size)
+ create ring_buffer.make (buffer)
+ current_input_buffer := ring_buffer
+ current_output_buffer := ring_buffer
+ ring_buffer.put_right (create {DS_LINKABLE [EPX_PARTIAL_BUFFER]}.make (create {EPX_PARTIAL_BUFFER}.allocate (part_size)))
+ ring_buffer.right.put_right (create {DS_LINKABLE [EPX_PARTIAL_BUFFER]}.make (create {EPX_PARTIAL_BUFFER}.allocate (part_size)))
+ ring_buffer.right.right.put_right (ring_buffer)
+ ensure
+ definition: part_size = a_part_size
+ end
+
+ set_verbose (a_verbose: INTEGER)
+ do
+ verbose := a_verbose
+ ensure
+ definition: verbose = a_verbose
+ end
+
+
+feature -- Writing
+
+ close
+ do
+ if verbose > 0 then
+ fd_stderr.put_line ("All input processed, finishing uploads.")
+ end
+ from
+ if current_output_buffer = current_input_buffer then
+ current_input_buffer := current_input_buffer.right
+ flush_output_buffer
+ else
+ flush_output_buffer
+ current_input_buffer := current_input_buffer.right
+ end
+ until
+ current_output_buffer = current_input_buffer
+ loop
+ flush_output_buffer
+ end
+
+ -- Reset
+ current_input_buffer := ring_buffer
+ current_output_buffer := ring_buffer
+ output_bytes_position := 0
+
+ if upload_id /= Void then
+ s3.complete_multipart_upload (upload_id, key)
+ if not s3.is_response_ok then
+ fd_stderr.put_line ("Failed to complete multi-part upload.")
+ if verbose > 0 then
+ fd_stderr.put (s3.body.as_string)
+ end
+ exit_with_failure
+ end
+ upload_id := Void
+ end
+ end
+
+ write (a_stream: ABSTRACT_DESCRIPTOR)
+ -- Read all from `a_buffer' into our temporary storage, and
+ -- pump out as much as possible to S3.
+ require
+ stream_not_void: a_stream /= Void
+ open: a_stream.is_open_read
+ local
+ bytes_to_read: INTEGER
+ input_buffer: EPX_PARTIAL_BUFFER
+ do
+ something_written := false
+ -- First try if network likes some more
+ --write_output_buffer
+
+ -- Read as much from `a_stream' as possible
+ number_of_nonblocking_reads := number_of_nonblocking_reads + 1
+ input_buffer := current_input_buffer.item
+ bytes_to_read := input_buffer.capacity - input_buffer.count
+ a_stream.read_buffer (input_buffer, input_buffer.count, bytes_to_read)
+ something_read := a_stream.last_read > 0
+ if not something_read and then not something_written and then not a_stream.end_of_input then
+ -- Nothing to read, and nothing written, just block till
+ -- we have a full buffer.
+ -- Note that we only get here when non-blocking i/o has
+ -- been enabled.
+ if verbose > 2 then
+ fd_stderr.put_line ("Nothing to read or write, read input in blocking mode")
+ end
+ fd_stdin.set_blocking_io (True)
+ number_of_blocking_reads := number_of_blocking_reads + 1
+ a_stream.read_buffer (input_buffer, input_buffer.count, bytes_to_read)
+ something_read := a_stream.last_read > 0
+ if verbose > 2 then
+ fd_stderr.put_line ("Read " + a_stream.last_read.out + " bytes in blocking mode")
+ end
+ fd_stdin.set_blocking_io (False)
+ end
+ input_buffer.set_count (input_buffer.count + a_stream.last_read)
+ total_bytes_read := total_bytes_read + a_stream.last_read
+ if input_buffer.count = input_buffer.capacity then
+ if current_input_buffer.right = current_output_buffer then
+ -- No more intermediate storage, must empty a buffer to S3 first
+ number_of_output_buffer_overflows := number_of_output_buffer_overflows + 1
+ if verbose > 1 then
+ fd_stderr.put_line ("No more buffer space, force flushing to S3")
+ end
+ flush_output_buffer
+ end
+ if verbose > 1 then
+ fd_stderr.put_line (once "Moving to next input buffer")
+ end
+ current_input_buffer := current_input_buffer.right
+ input_buffer := current_input_buffer.item
+ input_buffer.set_count (0)
+ end
+
+ write_output_buffer
+ end
+
+
+feature {NONE} -- Implementation
+
+ ring_buffer: DS_LINKABLE [EPX_PARTIAL_BUFFER]
+
+ current_input_buffer: like ring_buffer
+
+ current_output_buffer: like ring_buffer
+
+ output_bytes_position: INTEGER
+
+ display_waiting: BOOLEAN
+
+ open_s3
+ -- Initiate part upload.
+ -- If multipart upload has not yet started, start it.
+ require
+ not_open: not s3.is_open
+ do
+ assert_multipart_upload_started
+ s3.begin_part_upload (upload_id, key, current_output_buffer.item.count)
+ if verbose > 1 then
+ fd_stderr.put_line ("Part " + (s3.parts.count + 1).out + " upload started.")
+ end
+ ensure
+ s3_open: s3.is_open
+ end
+
+ assert_multipart_upload_started
+ -- Make sure we have an upload_id.
+ do
+ if upload_id = Void then
+ if verbose > 1 then
+ fd_stderr.put_line ("Initiating multipart upload.")
+ end
+ upload_id := s3.multipart_upload_id (key)
+ if not s3.is_response_ok then
+ fd_stderr.put_line ("Failed to initiate multi-part upload.")
+ if verbose > 0 then
+ fd_stderr.put (s3.body.as_string)
+ end
+ exit_with_failure
+ end
+ -- What if fails?
+ if verbose > 1 then
+ fd_stderr.put_line ("Multipart upload initiated.")
+ end
+ end
+ ensure
+ upload_id_set: upload_id /= Void and then not upload_id.is_empty
+ end
+
+ write_output_buffer
+ -- Write as much of the output buffer to S3 as network will
+ -- accept. If entire buffer could be written, finish upload
+ -- of part, and move output buffer pointer to next one in the
+ -- ring.
+ -- Note that in case of restart, `total_bytes_written' might
+ -- actually have become smaller.
+ local
+ output_buffer: EPX_PARTIAL_BUFFER
+ bytes_to_write: INTEGER
+ do
+ -- We can only write if we have a completed input buffer,
+ -- because we must know the entire size of the part upload in
+ -- advance.
+ if current_output_buffer /= current_input_buffer then
+
+ -- Write as much as possible without blocking
+ output_buffer := current_output_buffer.item
+ bytes_to_write := output_buffer.count - output_bytes_position
+ if bytes_to_write > 0 then
+ if not s3.is_open then
+ open_s3
+ end
+ do_write_output_buffer
+ display_waiting := true
+
+ if output_bytes_position = output_buffer.count then
+ part_finished
+ next_output_buffer
+ end
+ end
+ else
+ -- Else we need to wait for more input
+ number_of_output_buffer_underflows := number_of_output_buffer_underflows + 1
+ if verbose > 1 and then display_waiting then
+ fd_stderr.put_line ("Nothing to write to S3, waiting for more input")
+ display_waiting := false
+ end
+ end
+ end
+
+ do_write_output_buffer
+ -- Single write from output buffer to S3.
+ require
+ open: s3.is_open
+ something_to_write: current_output_buffer.item.count > output_bytes_position
+ not_reading_and_writing_to_same_buffer: current_output_buffer /= current_input_buffer
+ local
+ output_buffer: EPX_PARTIAL_BUFFER
+ bytes_to_write: INTEGER
+ http: EPX_TEXT_IO_STREAM
+ do
+ output_buffer := current_output_buffer.item
+ bytes_to_write := output_buffer.count - output_bytes_position