diff --git a/README.md b/README.md index dd83733..1f3a1f7 100644 --- a/README.md +++ b/README.md @@ -44,41 +44,50 @@ Or install it yourself as: require 'filestack' ``` Intialize the client using your API key, and security if you are using it. + ```ruby client = FilestackClient.new('YOUR_API_KEY', security: security_object) ``` ### Uploading -Filestack uses multipart uploading by default, which is faster for larger files. This can be turned off by passing in ```multipart: false```. Multipart is disabled when uploading external URLs. ```ruby -filelink = client.upload(filepath: '/path/to/file') - -filelink = client.upload(filepath: '/path/to/file', multipart: false) +filelink = client.upload(filepath: '/path/to/localfile') # OR -filelink = client.upload(external_url: 'http://someurl.com') +filelink = client.upload(external_url: 'http://domain.com/image.png') ``` To upload a local and an external file with query parameters: ```ruby -filelink = client.upload(filepath: '/path/to/file', options: {mimetype: 'image/png'}) +filelink = client.upload(filepath: '/path/to/localfile', options: { mimetype: 'image/png' }) -filelink = client.upload(external_url: 'http://someurl.com/image.png', options: {mimetype: 'image/jpeg'}) +filelink = client.upload(external_url: 'http://domain.com/image.png', options: { mimetype: 'image/jpeg' }) ``` To store file on `dropbox`, `azure`, `gcs` or `rackspace`, you must have the chosen provider configured in the developer portal to enable this feature. By default the file is stored on `s3`. You can add more details of the storage in `options`. ```ruby -filelink = client.upload(filepath: '/path/to/file', storage: 'dropbox', options: {path: 'folder_name/'}) +filelink = client.upload(filepath: '/path/to/file', storage: 's3', options: { path: 'folder_name/', container: 'container_name', location: 's3', region: 'region_name' }) + +filelink = client.upload(external_url: 'http://someurl.com/image.png', options: { location: 'dropbox', path: 'folder_name' }) +``` + +### Workflows +Workflows allow you to wire up conditional logic and image processing to enforce business processes, automate ingest, and save valuable development time. In order to trigger the workflow job for each upload: + +```ruby +filelink = client.upload(filepath: '/path/to/file', options: { workflows: ["workflow_id_1", "workflow_id_2"] }) + +#OR -filelink = client.upload(external_url: 'http://someurl.com/image.png', storage: 'dropbox', options: {path: 'folder_name/'}) +filelink = client.upload(external_url: 'http://someurl.com/image.png', options: { workflows: ["workflow_id_1"] }) ``` ### Security If security is enabled on your account, or if you are using certain actions that require security (delete, overwrite and certain transformations), you will need to create a security object and pass it into the client on instantiation. ```ruby -security = FilestackSecurity.new('YOUR_APP_SECRET', options: {call: %w[read store pick]}) +security = FilestackSecurity.new('YOUR_APP_SECRET', options: {call: %w[read store pick runWorkflow]}) client = FilestackClient.new('YOUR_API_KEY', security: security) ``` diff --git a/lib/filestack/config.rb b/lib/filestack/config.rb index 2c1582a..370932c 100644 --- a/lib/filestack/config.rb +++ b/lib/filestack/config.rb @@ -7,11 +7,6 @@ class FilestackConfig CDN_URL = 'https://cdn.filestackcontent.com'.freeze PROCESS_URL = 'https://process.filestackapi.com'.freeze - MULTIPART_START_URL = 'https://upload.filestackapi.com/multipart/start'.freeze - MULTIPART_UPLOAD_URL = 'https://upload.filestackapi.com/multipart/upload'.freeze - MULTIPART_COMMIT_URL = 'https://upload.filestackapi.com/multipart/commit'.freeze - MULTIPART_COMPLETE_URL = 'https://upload.filestackapi.com/multipart/complete'.freeze - MULTIPART_PARAMS = %w[ store_location store_region store_container store_path store_access @@ -22,10 +17,28 @@ class FilestackConfig VERSION = Filestack::Ruby::VERSION HEADERS = { 'User-Agent' => "filestack-ruby #{VERSION}", - 'Filestack-Source' => "Ruby-#{VERSION}" + 'Filestack-Source' => "Ruby-#{VERSION}", + 'Content-Type' => "application/json", + 'Accept-Encoding' => "application/json" }.freeze INTELLIGENT_ERROR_MESSAGES = ['BACKEND_SERVER', 'BACKEND_NETWORK', 'S3_SERVER', 'S3_NETWORK'] + + def self.multipart_start_url + "https://upload.filestackapi.com/multipart/start" + end + + def self.multipart_upload_url(base_url) + "https://#{base_url}/multipart/upload" + end + + def self.multipart_commit_url(base_url) + "https://#{base_url}/multipart/commit" + end + + def self.multipart_complete_url(base_url) + "https://#{base_url}/multipart/complete" + end end class TransformConfig diff --git a/lib/filestack/models/filestack_client.rb b/lib/filestack/models/filestack_client.rb index c656734..89e8793 100644 --- a/lib/filestack/models/filestack_client.rb +++ b/lib/filestack/models/filestack_client.rb @@ -26,26 +26,19 @@ def initialize(apikey, security: nil) # Upload a local file or external url # @param [String] filepath The path of a local file # @param [String] external_url An external URL - # @param [Bool] multipart Switch for miltipart - # (Default: true) # @param [Hash] options User-supplied upload options # # return [Filestack::FilestackFilelink] - def upload(filepath: nil, external_url: nil, multipart: true, options: {}, storage: 's3', intelligent: false, timeout: 60) - if filepath && external_url - return 'You cannot upload a URL and file at the same time' - end - response = if filepath && multipart + def upload(filepath: nil, external_url: nil, options: {}, intelligent: false, timeout: 60, storage: 'S3') + return 'You cannot upload a URL and file at the same time' if filepath && external_url + + response = if filepath multipart_upload(@apikey, filepath, @security, options, timeout, storage, intelligent: intelligent) else - send_upload( - @apikey, - filepath: filepath, - external_url: external_url, - options: options, - security: @security, - storage: storage - ) + send_upload(@apikey, + external_url: external_url, + options: options, + security: @security) end FilestackFilelink.new(response['handle'], security: @security, apikey: @apikey) end diff --git a/lib/filestack/utils/multipart_upload_utils.rb b/lib/filestack/utils/multipart_upload_utils.rb index 993ce0e..6b557d4 100644 --- a/lib/filestack/utils/multipart_upload_utils.rb +++ b/lib/filestack/utils/multipart_upload_utils.rb @@ -23,16 +23,6 @@ def get_file_info(file) [filename, filesize, mimetype.to_s] end - def multipart_options(options) - [:region, :container, :path, :access].each do |key| - if options.has_key?(key) - options[:"store_#{key}"] = options[key] - options.delete(key) - end - end - return options - end - # Send start response to multipart endpoint # # @param [String] apikey Filestack API key @@ -51,23 +41,21 @@ def multipart_start(apikey, filename, filesize, mimetype, security, storage, opt filename: filename, mimetype: mimetype, size: filesize, - store_location: storage, - file: Tempfile.new(filename), - multipart: intelligent + store: { location: storage }, + fii: intelligent } - options = multipart_options(options) - params = params.merge!(options) if options + params[:store].merge!(options) if options unless security.nil? params[:policy] = security.policy params[:signature] = security.signature end - response = Typhoeus.post( - FilestackConfig::MULTIPART_START_URL, body: params, - headers: FilestackConfig::HEADERS - ) + response = Typhoeus.post(FilestackConfig.multipart_start_url, + body: params.to_json, + headers: FilestackConfig::HEADERS) + if response.code == 200 JSON.parse(response.body) else @@ -93,7 +81,7 @@ def create_upload_jobs(apikey, filename, filepath, filesize, start_response, sto seek_point = 0 while seek_point < filesize part_info = { - seek: seek_point, + seek_point: seek_point, filepath: filepath, filename: filename, apikey: apikey, @@ -104,10 +92,10 @@ def create_upload_jobs(apikey, filename, filepath, filesize, start_response, sto upload_id: start_response['upload_id'], location_url: start_response['location_url'], start_response: start_response, - store_location: storage + store: { location: storage } } - options = multipart_options(options) - part_info = part_info.merge!(options) if options + + part_info[:store].merge!(options) if options if seek_point + FilestackConfig::DEFAULT_CHUNK_SIZE > filesize size = filesize - (seek_point) @@ -135,9 +123,9 @@ def create_upload_jobs(apikey, filename, filepath, filesize, start_response, sto # multipart uploads # # @return [Typhoeus::Response] - def upload_chunk(job, apikey, filepath, options) + def upload_chunk(job, apikey, filepath, options, storage) file = File.open(filepath) - file.seek(job[:seek]) + file.seek(job[:seek_point]) chunk = file.read(FilestackConfig::DEFAULT_CHUNK_SIZE) md5 = Digest::MD5.new @@ -150,14 +138,14 @@ def upload_chunk(job, apikey, filepath, options) uri: job[:uri], region: job[:region], upload_id: job[:upload_id], - store_location: job[:store_location], + store: { location: storage }, file: Tempfile.new(job[:filename]) } data = data.merge!(options) if options - fs_response = Typhoeus.post( - FilestackConfig::MULTIPART_UPLOAD_URL, body: data, - headers: FilestackConfig::HEADERS - ).body + + fs_response = Typhoeus.post(FilestackConfig.multipart_upload_url(job[:location_url]), + body: data.to_json, + headers: FilestackConfig::HEADERS).body fs_response = JSON.parse(fs_response) Typhoeus.put( fs_response['url'], headers: fs_response['headers'], body: chunk @@ -172,17 +160,17 @@ def upload_chunk(job, apikey, filepath, options) # multipart uploads # # @return [Array] Array of parts/etags strings - def run_uploads(jobs, apikey, filepath, options) + def run_uploads(jobs, apikey, filepath, options, storage) bar = ProgressBar.new(jobs.length) results = Parallel.map(jobs, in_threads: 4) do |job| response = upload_chunk( - job, apikey, filepath, options + job, apikey, filepath, options, storage ) if response.code == 200 bar.increment! part = job[:part] etag = response.headers[:etag] - "#{part}:#{etag}" + { part_number: part, etag: etag } end end results @@ -213,17 +201,14 @@ def multipart_complete(apikey, filename, filesize, mimetype, start_response, par filename: filename, size: filesize, mimetype: mimetype, - store_location: storage, - file: Tempfile.new(filename) + store: { location: storage }, } - options = multipart_options(options) - data.merge!(options) if options - data.merge!(intelligent ? { multipart: intelligent } : { parts: parts_and_etags.join(';') }) + data[:store].merge!(options) if options + data.merge!(intelligent ? { fii: intelligent } : { parts: parts_and_etags }) - Typhoeus.post( - FilestackConfig::MULTIPART_COMPLETE_URL, body: data, - headers: FilestackConfig::HEADERS - ) + Typhoeus.post(FilestackConfig.multipart_complete_url(start_response['location_url']), + body: data.to_json, + headers: FilestackConfig::HEADERS) end # Run entire multipart process through with file and options @@ -238,27 +223,24 @@ def multipart_complete(apikey, filename, filesize, mimetype, start_response, par # @return [Hash] def multipart_upload(apikey, filepath, security, options, timeout, storage, intelligent: false) filename, filesize, mimetype = get_file_info(filepath) + start_response = multipart_start( apikey, filename, filesize, mimetype, security, storage, options, intelligent ) - unless start_response['upload_type'].nil? - intelligent_enabled = ((start_response['upload_type'].include? 'intelligent_ingestion')) && intelligent - end - jobs = create_upload_jobs( apikey, filename, filepath, filesize, start_response, storage, options ) - if intelligent_enabled + if intelligent state = IntelligentState.new - run_intelligent_upload_flow(jobs, state) + run_intelligent_upload_flow(jobs, state, storage) response_complete = multipart_complete( apikey, filename, filesize, mimetype, start_response, nil, options, storage, intelligent ) else - parts_and_etags = run_uploads(jobs, apikey, filepath, options) + parts_and_etags = run_uploads(jobs, apikey, filepath, options, storage) response_complete = multipart_complete( apikey, filename, filesize, mimetype, start_response, parts_and_etags, options, storage diff --git a/lib/filestack/utils/utils.rb b/lib/filestack/utils/utils.rb index daa6d2a..10b369b 100644 --- a/lib/filestack/utils/utils.rb +++ b/lib/filestack/utils/utils.rb @@ -62,36 +62,42 @@ def make_call(url, action, parameters: nil, headers: nil) ) end + def build_store_task(options = {}) + return 'store' if options.empty? + tasks = [] + options.each do |key, value| + value = case key + when :workflows + [value.join('","')] + when :path + "\"#{value}\"" + else + value + end + tasks.push("#{key}:#{value.to_s.downcase}") + end + "store=#{tasks.join(',')}" + end + # Uploads to v1 REST API (for external URLs or if multipart is turned off) # # @param [String] apikey Filestack API key - # @param [String] filepath Local path to file # @param [String] external_url External URL to be uploaded # @param [FilestackSecurity] security Security object with # policy/signature # @param [Hash] options User-defined options for # multipart uploads - # @param [String] storage Storage destination - # (s3, rackspace, etc) # @return [Hash] - def send_upload(apikey, filepath: nil, external_url: nil, security: nil, options: nil, storage: 'S3') - data = if filepath - { fileUpload: File.open(filepath) } - else - { url: external_url } - end - - # adds any user-defined upload options to request payload - data = data.merge!(options) unless options.nil? - base = "#{FilestackConfig::API_URL}/store/#{storage}?key=#{apikey}" + def send_upload(apikey, external_url: nil, security: nil, options: nil) + base = "#{FilestackConfig::CDN_URL}/#{apikey}/#{build_store_task(options)}" if security policy = security.policy signature = security.signature - base = "#{base}&signature=#{signature}&policy=#{policy}" + base = "#{base}/security=s:#{signature},p:#{policy}" end - response = Typhoeus.post(base, body: data, headers: FilestackConfig::HEADERS) + response = Typhoeus.post("#{base}/#{external_url}", headers: FilestackConfig::HEADERS) if response.code == 200 response_body = JSON.parse(response.body) @@ -193,7 +199,7 @@ def change_offset(working_offset, state) # @param [IntelligentState] state An IntelligentState object # # @return [Array] - def run_intelligent_upload_flow(jobs, state) + def run_intelligent_upload_flow(jobs, state, storage) bar = ProgressBar.new(jobs.length) generator = create_intelligent_generator(jobs) working_offset = FilestackConfig::DEFAULT_OFFSET_SIZE @@ -201,7 +207,7 @@ def run_intelligent_upload_flow(jobs, state) batch = get_generator_batch(generator) # run parts Parallel.map(batch, in_threads: 4) do |part| - state = run_intelligent_uploads(part, state) + state = run_intelligent_uploads(part, state, storage) # condition: a chunk has failed but we have not reached the maximum retries while bad_state(state) # condition: timeout to S3, requiring offset size to be changed @@ -213,7 +219,7 @@ def run_intelligent_upload_flow(jobs, state) sleep(state.backoff) end state.add_retry - state = run_intelligent_uploads(part, state) + state = run_intelligent_uploads(part, state, storage) end raise "Upload has failed. Please try again later." unless state.ok bar.increment! @@ -269,13 +275,14 @@ def create_upload_job_chunks(jobs, state, apikey, filename, filepath, filesize, # multipart_start # # @return [Dict] - def chunk_job(job, state, apikey, filename, filepath, filesize, start_response) + def chunk_job(job, state, apikey, filename, filepath, filesize, start_response, storage) offset = 0 - seek_point = job[:seek] + seek_point = job[:seek_point] chunk_list = [] + while (offset < FilestackConfig::DEFAULT_CHUNK_SIZE) && (seek_point + offset) < filesize chunk_list.push( - seek: seek_point, + seek_point: seek_point, filepath: filepath, filename: filename, apikey: apikey, @@ -285,7 +292,7 @@ def chunk_job(job, state, apikey, filename, filepath, filesize, start_response) region: start_response['region'], upload_id: start_response['upload_id'], location_url: start_response['location_url'], - store_location: job[:store_location], + store: { location: storage }, offset: offset ) offset += state.offset @@ -300,15 +307,15 @@ def chunk_job(job, state, apikey, filename, filepath, filesize, start_response) # @param [IntelligentState] state An IntelligentState object # # @return [IntelligentState] - def run_intelligent_uploads(part, state) + def run_intelligent_uploads(part, state, storage) failed = false chunks = chunk_job( part, state, part[:apikey], part[:filename], part[:filepath], - part[:filesize], part[:start_response] + part[:filesize], part[:start_response], storage ) Parallel.map(chunks, in_threads: 3) do |chunk| begin - upload_chunk_intelligently(chunk, state, part[:apikey], part[:filepath], part[:options]) + upload_chunk_intelligently(chunk, state, part[:apikey], part[:filepath], part[:options], storage) rescue => e state.error_type = e.message failed = true @@ -322,6 +329,7 @@ def run_intelligent_uploads(part, state) else state.ok = true end + commit_params = { apikey: part[:apikey], uri: part[:uri], @@ -329,12 +337,14 @@ def run_intelligent_uploads(part, state) upload_id: part[:upload_id], size: part[:filesize], part: part[:part], - location_url: part[:location_url], - store_location: part[:store_location], - file: Tempfile.new(part[:filename]) + location_url: part[:start_response]['location_url'], + store: { location: storage } } - response = Typhoeus.post(FilestackConfig::MULTIPART_COMMIT_URL, body: commit_params, - headers: FilestackConfig::HEADERS) + + response = Typhoeus.post(FilestackConfig.multipart_commit_url(commit_params[:location_url]), + body: commit_params.to_json, + headers: FilestackConfig::HEADERS) + if response.code == 200 state.reset else @@ -354,9 +364,10 @@ def run_intelligent_uploads(part, state) # multipart uploads # # @return [Typhoeus::Response] - def upload_chunk_intelligently(job, state, apikey, filepath, options) + def upload_chunk_intelligently(job, state, apikey, filepath, options, storage) file = File.open(filepath) - file.seek(job[:seek] + job[:offset]) + file.seek(job[:seek_point] + job[:offset]) + chunk = file.read(state.offset) md5 = Digest::MD5.new md5 << chunk @@ -368,17 +379,17 @@ def upload_chunk_intelligently(job, state, apikey, filepath, options) uri: job[:uri], region: job[:region], upload_id: job[:upload_id], - store_location: job[:store_location], + store: { location: storage }, offset: job[:offset], - file: Tempfile.new(job[:filename]), - 'multipart' => 'true' + fii: true } data = data.merge!(options) if options - fs_response = Typhoeus.post( - FilestackConfig::MULTIPART_UPLOAD_URL, body: data, - headers: FilestackConfig::HEADERS - ) + + fs_response = Typhoeus.post(FilestackConfig.multipart_upload_url(job[:location_url]), + body: data.to_json, + headers: FilestackConfig::HEADERS) + # POST to multipart/upload begin unless fs_response.code == 200 diff --git a/spec/filestack/ruby_spec.rb b/spec/filestack/ruby_spec.rb index 4b76522..0814101 100644 --- a/spec/filestack/ruby_spec.rb +++ b/spec/filestack/ruby_spec.rb @@ -53,22 +53,21 @@ def code @test_filename = 'calvinandhobbes.jpg' @test_filesize = 10000 @test_mimetype = 'image/jpeg' + @storage = 's3' @start_response = { 'uri' => 'uri', 'region' => 'region', 'upload_id' => 'upload_id', 'location_url' => 'location_url', - 'upload_type' => 'not_intelligent' } @intelligent_start_response = { 'uri' => 'uri', 'region' => 'region', 'upload_id' => 'upload_id', 'location_url' => 'location_url', - 'upload_type' => 'intelligent_ingestion' } @job = { - seek: 0, + seek_point: 0, filepath: @test_filepath, filename: @test_filename, apikey: @test_apikey, @@ -76,7 +75,7 @@ def code uri: @start_response[:uri], region: @start_response[:region], upload_id: @start_response[:upload_id], - location_url: @start_response[:location_url] + store: { location: @storage } } @response = GeneralResponse.new(@start_response) @test_client = FilestackClient.new(@test_apikey) @@ -121,40 +120,42 @@ def code expect(@test_secure_filelink.url) end - it 'FilestackFilelink uploads without multipart' do + it 'FilestackFilelink uploads' do class UploadResponse def code 200 end def body - {'url' => 'https://cdn.filestackcontent.com/somehandle'}.to_json + { handle: 'somehandle', + url: 'https://cdn.filestackcontent.com/somehandle' }.to_json end + end allow(Typhoeus).to receive(:post) .and_return(UploadResponse.new) - filelink = @test_secure_client.upload(filepath: @test_filepath, multipart: false) + filelink = @test_secure_client.upload(filepath: @test_filepath) expect(filelink.handle).to eq('somehandle') end - it 'FilestackFilelink uploads external without multipart' do + it 'FilestackFilelink uploads external' do class UploadResponse def code 200 end def body - {'url' => 'https://cdn.filestackcontent.com/somehandle'}.to_json + { url: 'https://cdn.filestackcontent.com/somehandle' }.to_json end end allow(Typhoeus).to receive(:post) .and_return(UploadResponse.new) - filelink = @test_secure_client.upload(external_url: @test_filepath, multipart: false) + filelink = @test_secure_client.upload(external_url: @test_filepath) expect(filelink.handle).to eq('somehandle') end it 'Does not upload when both url and filepath are present' do - bad = @test_secure_client.upload(filepath: @test_filepath, external_url: 'someurl', multipart: false) + bad = @test_secure_client.upload(filepath: @test_filepath, external_url: 'someurl') expect(bad).to eq('You cannot upload a URL and file at the same time') end @@ -194,7 +195,7 @@ def body it 'returns the correct create_upload_jobs array' do jobs = create_upload_jobs( @test_apikey, @test_filename, @test_filepath, - @test_filesize, @start_response, 's3', {} + @test_filesize, @start_response, @storage, {} ) expect(jobs[0][:filepath]).to eq(@test_filepath) end @@ -213,7 +214,7 @@ def body .and_return(@response) response = MultipartUploadUtils.upload_chunk( - @job, @test_apikey, @test_filepath, nil + @job, @test_apikey, @test_filepath, nil, @storage ) expect(response.body).to eq(@response.body) end @@ -234,20 +235,20 @@ def code jobs.push(@job) end part = @job[:part] - result_string = "#{part}:someetag" + result = { part_number: part, etag: "someetag" } allow(MultipartUploadUtils).to receive(:upload_chunk) .and_return(HeadersResponse.new) target_results = [] 2.times do - target_results.push(result_string) + target_results.push(result) end results = MultipartUploadUtils.run_uploads( - jobs, @test_apikey, @test_filepath, nil + jobs, @test_apikey, @test_filepath, nil, @storage ) 2.times do |i| - expect(results[i]).to eq(result_string) + expect(results[i]).to eq(result) end end @@ -255,7 +256,7 @@ def code allow(Typhoeus).to receive(:post).and_return(@response) response = MultipartUploadUtils.multipart_complete( @test_apikey, @test_filename, @test_filesize, @test_mimetype, - @start_response, %w[somepartsandetags somepartsandetags], {}, 's3' + @start_response, %w[somepartsandetags somepartsandetags], {}, @storage ) expect(response.body).to eq(@response.body) end @@ -268,7 +269,7 @@ def code allow_any_instance_of(MultipartUploadUtils).to receive(:multipart_complete) .and_return(GeneralResponse.new(@start_response)) response = MultipartUploadUtils.multipart_upload( - @test_apikey, @test_filepath, nil, {}, 60, 's3', intelligent: false + @test_apikey, @test_filepath, nil, {}, 60, @storage, intelligent: false ) expect(response.to_json).to eq(@response.body) end @@ -324,12 +325,12 @@ def code state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - IntelligentUtils.run_intelligent_upload_flow(jobs, state) + IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage) expect(true) end @@ -338,26 +339,26 @@ def code filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) state.ok = false jobs = MultipartUploadUtils.create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) end it 'runs intelligent uploads without error' do state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:upload_chunk_intelligently) .and_return(state) allow(Typhoeus).to receive(:post) .and_return(@response) - state = IntelligentUtils.run_intelligent_uploads(jobs[0], state) + state = IntelligentUtils.run_intelligent_uploads(jobs[0], state, @storage) expect(state.ok) end @@ -365,12 +366,12 @@ def code state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:upload_chunk_intelligently) .and_raise('FAILURE') - state = IntelligentUtils.run_intelligent_uploads(jobs[0], state) + state = IntelligentUtils.run_intelligent_uploads(jobs[0], state, @storage) expect(state.ok).to eq(false) expect(state.error_type).to eq('FAILURE') end @@ -379,66 +380,66 @@ def code state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) state.ok = false state.error_type = 'BACKEND_SERVER' allow_any_instance_of(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) end it 'retries upon network failure' do state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) state.ok = false state.error_type = 'S3_NETWORK' allow_any_instance_of(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) end it 'retries upon server failure' do state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) state.ok = false state.error_type = 'S3_SERVER' allow_any_instance_of(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) end it 'retries upon backend network failure' do state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) state.ok = false state.error_type = 'BACKEND_NETWORK' allow_any_instance_of(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) end it 'runs intelligent uploads with 400 error' do state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:upload_chunk_intelligently) .and_return(true) allow(Typhoeus).to receive(:post) .and_return(Response.new(400)) - state = IntelligentUtils.run_intelligent_uploads(jobs[0], state) + state = IntelligentUtils.run_intelligent_uploads(jobs[0], state, @storage) expect(state.ok).to eq(false) end @@ -459,7 +460,7 @@ def code state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) allow(Typhoeus).to receive(:post) @@ -467,7 +468,7 @@ def code allow(Typhoeus).to receive(:put) .and_return(@response) jobs[0][:offset] = 0 - response = IntelligentUtils.upload_chunk_intelligently(jobs[0], state, @test_apikey, @test_filepath, {}) + response = IntelligentUtils.upload_chunk_intelligently(jobs[0], state, @test_apikey, @test_filepath, {}, @storage) expect(response.code).to eq(200) end @@ -488,7 +489,7 @@ def code state = IntelligentState.new filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, 's3', {} + @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} ) allow(Typhoeus).to receive(:post) @@ -496,7 +497,7 @@ def code allow(Typhoeus).to receive(:put) .and_return(Response.new(400)) jobs[0][:offset] = 0 - expect {IntelligentUtils.upload_chunk_intelligently(jobs[0], state, @test_apikey, @test_filepath, {})}.to raise_error(RuntimeError) + expect {IntelligentUtils.upload_chunk_intelligently(jobs[0], state, @test_apikey, @test_filepath, {}, @storage)}.to raise_error(RuntimeError) end