From 277613c73109ff284123a57bc7f39815098f2b62 Mon Sep 17 00:00:00 2001 From: Gabriela Date: Tue, 29 Sep 2020 20:59:31 +0200 Subject: [PATCH] Add IO object upload --- README.md | 26 +++- lib/filestack/config.rb | 2 + lib/filestack/models/filestack_client.rb | 20 +-- lib/filestack/utils/multipart_upload_utils.rb | 64 ++++++--- lib/filestack/utils/utils.rb | 22 ++- spec/filestack/ruby_spec.rb | 126 +++++++++++------- 6 files changed, 168 insertions(+), 92 deletions(-) diff --git a/README.md b/README.md index 1f3a1f7..5e32ef7 100644 --- a/README.md +++ b/README.md @@ -55,13 +55,33 @@ filelink = client.upload(filepath: '/path/to/localfile') # OR filelink = client.upload(external_url: 'http://domain.com/image.png') + +# OR + +file = StringIO.new +filelink = client.upload(io: file) ``` -To upload a local and an external file with query parameters: +To upload a local, an IO object and an external file with following optional options: + ```ruby -filelink = client.upload(filepath: '/path/to/localfile', options: { mimetype: 'image/png' }) +options = { + filename: 'string', + location: 'string', + path: 'string', + container: 'string', + mimetype: 'string', + region: 'string', + workflows: ['workflow-id-1', 'workflow-id-2'], + upload_tags: { + key: 'value', + key2: 'value' + } +} + +filelink = client.upload(filepath: '/path/to/localfile', options: { mimetype: 'image/png', filename: 'custom_filename.png' }) -filelink = client.upload(external_url: 'http://domain.com/image.png', options: { mimetype: 'image/jpeg' }) +filelink = client.upload(external_url: 'http://domain.com/image.png', options: { mimetype: 'image/jpeg', filename: 'custom_filename.png' }) ``` To store file on `dropbox`, `azure`, `gcs` or `rackspace`, you must have the chosen provider configured in the developer portal to enable this feature. By default the file is stored on `s3`. You can add more details of the storage in `options`. diff --git a/lib/filestack/config.rb b/lib/filestack/config.rb index 370932c..9ce05c5 100644 --- a/lib/filestack/config.rb +++ b/lib/filestack/config.rb @@ -22,6 +22,8 @@ class FilestackConfig 'Accept-Encoding' => "application/json" }.freeze + DEFAULT_UPLOAD_MIMETYPE = 'application/octet-stream' + INTELLIGENT_ERROR_MESSAGES = ['BACKEND_SERVER', 'BACKEND_NETWORK', 'S3_SERVER', 'S3_NETWORK'] def self.multipart_start_url diff --git a/lib/filestack/models/filestack_client.rb b/lib/filestack/models/filestack_client.rb index 89e8793..87025e8 100644 --- a/lib/filestack/models/filestack_client.rb +++ b/lib/filestack/models/filestack_client.rb @@ -23,23 +23,25 @@ def initialize(apikey, security: nil) @security = security end - # Upload a local file or external url + # Upload a local file, external url or IO object # @param [String] filepath The path of a local file # @param [String] external_url An external URL + # @param [StringIO] io The IO object # @param [Hash] options User-supplied upload options + # @param [Boolean] intelligent Upload file using Filestack Intelligent Ingestion + # @param [String] storage Default storage to be used for uploads # # return [Filestack::FilestackFilelink] - def upload(filepath: nil, external_url: nil, options: {}, intelligent: false, timeout: 60, storage: 'S3') - return 'You cannot upload a URL and file at the same time' if filepath && external_url + def upload(filepath: nil, external_url: nil, io: nil, options: {}, intelligent: false, timeout: 60, storage: 'S3') + return 'You cannot upload a URL and file at the same time' if (filepath || io) && external_url - response = if filepath - multipart_upload(@apikey, filepath, @security, options, timeout, storage, intelligent: intelligent) + response = if external_url + send_upload(@apikey, external_url, @security, options) else - send_upload(@apikey, - external_url: external_url, - options: options, - security: @security) + return 'You cannot upload IO object and file at the same time' if io && filepath + multipart_upload(@apikey, filepath, io, @security, options, timeout, storage, intelligent) end + FilestackFilelink.new(response['handle'], security: @security, apikey: @apikey) end # Transform an external URL diff --git a/lib/filestack/utils/multipart_upload_utils.rb b/lib/filestack/utils/multipart_upload_utils.rb index 6b557d4..979e351 100644 --- a/lib/filestack/utils/multipart_upload_utils.rb +++ b/lib/filestack/utils/multipart_upload_utils.rb @@ -13,13 +13,22 @@ include IntelligentUtils # Includes all the utility functions for Filestack multipart uploads module MultipartUploadUtils - def get_file_info(file) - filename = File.basename(file) + + def get_file_attributes(file, options = {}) + filename = options[:filename] || File.basename(file) + mimetype = options[:mimetype] || MimeMagic.by_magic(File.open(file)) || FilestackConfig::DEFAULT_UPLOAD_MIMETYPE filesize = File.size(file) - mimetype = MimeMagic.by_magic(File.open(file)) - if mimetype.nil? - mimetype = 'application/octet-stream' - end + + [filename, filesize, mimetype.to_s] + end + + def get_io_attributes(io, options = {}) + filename = options[:filename] || 'unnamed_file' + mimetype = options[:mimetype] || FilestackConfig::DEFAULT_UPLOAD_MIMETYPE + + io.seek(0, IO::SEEK_END) + filesize = io.tell + [filename, filesize, mimetype.to_s] end @@ -31,8 +40,10 @@ def get_file_info(file) # @param [String] mimetype Mimetype of incoming file # @param [FilestackSecurity] security Security object with # policy/signature + # @param [String] storage Default storage to be used for uploads # @param [Hash] options User-defined options for # multipart uploads + # @param [Bool] intelligent Upload file using Filestack Intelligent Ingestion # # @return [Typhoeus::Response] def multipart_start(apikey, filename, filesize, mimetype, security, storage, options = {}, intelligent) @@ -67,22 +78,21 @@ def multipart_start(apikey, filename, filesize, mimetype, security, storage, opt # # @param [String] apikey Filestack API key # @param [String] filename Name of incoming file - # @param [String] filepath Local path to file # @param [Int] filesize Size of incoming file # @param [Typhoeus::Response] start_response Response body from # multipart_start + # @param [String] storage Default storage to be used for uploads # @param [Hash] options User-defined options for # multipart uploads # # @return [Array] - def create_upload_jobs(apikey, filename, filepath, filesize, start_response, storage, options) + def create_upload_jobs(apikey, filename, filesize, start_response, storage, options) jobs = [] part = 1 seek_point = 0 while seek_point < filesize part_info = { seek_point: seek_point, - filepath: filepath, filename: filename, apikey: apikey, part: part, @@ -92,7 +102,7 @@ def create_upload_jobs(apikey, filename, filepath, filesize, start_response, sto upload_id: start_response['upload_id'], location_url: start_response['location_url'], start_response: start_response, - store: { location: storage } + store: { location: storage }, } part_info[:store].merge!(options) if options @@ -116,15 +126,16 @@ def create_upload_jobs(apikey, filename, filepath, filesize, start_response, sto # @param [Hash] job Hash of options needed # to upload a chunk # @param [String] apikey Filestack API key - # @param [String] location_url Location url given back + # @param [String] filepath Location url given back # from endpoint - # @param [String] filepath Local path to file + # @param [StringIO] io The IO object # @param [Hash] options User-defined options for # multipart uploads + # @param [String] storage Default storage to be used for uploads # # @return [Typhoeus::Response] - def upload_chunk(job, apikey, filepath, options, storage) - file = File.open(filepath) + def upload_chunk(job, apikey, filepath, io, options, storage) + file = filepath ? File.open(filepath) : io file.seek(job[:seek_point]) chunk = file.read(FilestackConfig::DEFAULT_CHUNK_SIZE) @@ -139,7 +150,6 @@ def upload_chunk(job, apikey, filepath, options, storage) region: job[:region], upload_id: job[:upload_id], store: { location: storage }, - file: Tempfile.new(job[:filename]) } data = data.merge!(options) if options @@ -158,13 +168,14 @@ def upload_chunk(job, apikey, filepath, options, storage) # @param [String] filepath Local path to file # @param [Hash] options User-defined options for # multipart uploads + # @param [String] storage Default storage to be used for uploads # # @return [Array] Array of parts/etags strings - def run_uploads(jobs, apikey, filepath, options, storage) + def run_uploads(jobs, apikey, filepath, io, options, storage) bar = ProgressBar.new(jobs.length) results = Parallel.map(jobs, in_threads: 4) do |job| response = upload_chunk( - job, apikey, filepath, options, storage + job, apikey, filepath, io, options, storage ) if response.code == 200 bar.increment! @@ -190,6 +201,8 @@ def run_uploads(jobs, apikey, filepath, options, storage) # part numbers # @param [Hash] options User-defined options for # multipart uploads + # @param [String] storage Default storage to be used for uploads + # @param [Boolean] intelligent Upload file using Filestack Intelligent Ingestion # # @return [Typhoeus::Response] def multipart_complete(apikey, filename, filesize, mimetype, start_response, parts_and_etags, options, storage, intelligent = false) @@ -215,32 +228,39 @@ def multipart_complete(apikey, filename, filesize, mimetype, start_response, par # # @param [String] apikey Filestack API key # @param [String] filename Name of incoming file + # @param [StringIO] io The IO object # @param [FilestackSecurity] security Security object with # policy/signature # @param [Hash] options User-defined options for # multipart uploads + # @param [String] storage Default storage to be used for uploads + # @param [Boolean] intelligent Upload file using Filestack Intelligent Ingestion # # @return [Hash] - def multipart_upload(apikey, filepath, security, options, timeout, storage, intelligent: false) - filename, filesize, mimetype = get_file_info(filepath) + def multipart_upload(apikey, filepath, io, security, options, timeout, storage, intelligent = false) + filename, filesize, mimetype = if filepath + get_file_attributes(filepath, options) + else + get_io_attributes(io, options) + end start_response = multipart_start( apikey, filename, filesize, mimetype, security, storage, options, intelligent ) jobs = create_upload_jobs( - apikey, filename, filepath, filesize, start_response, storage, options + apikey, filename, filesize, start_response, storage, options ) if intelligent state = IntelligentState.new - run_intelligent_upload_flow(jobs, state, storage) + run_intelligent_upload_flow(jobs, filepath, io, state, storage) response_complete = multipart_complete( apikey, filename, filesize, mimetype, start_response, nil, options, storage, intelligent ) else - parts_and_etags = run_uploads(jobs, apikey, filepath, options, storage) + parts_and_etags = run_uploads(jobs, apikey, filepath, io, options, storage) response_complete = multipart_complete( apikey, filename, filesize, mimetype, start_response, parts_and_etags, options, storage diff --git a/lib/filestack/utils/utils.rb b/lib/filestack/utils/utils.rb index 10b369b..429220b 100644 --- a/lib/filestack/utils/utils.rb +++ b/lib/filestack/utils/utils.rb @@ -88,7 +88,7 @@ def build_store_task(options = {}) # @param [Hash] options User-defined options for # multipart uploads # @return [Hash] - def send_upload(apikey, external_url: nil, security: nil, options: nil) + def send_upload(apikey, external_url = nil, security = nil, options = nil) base = "#{FilestackConfig::CDN_URL}/#{apikey}/#{build_store_task(options)}" if security @@ -199,7 +199,7 @@ def change_offset(working_offset, state) # @param [IntelligentState] state An IntelligentState object # # @return [Array] - def run_intelligent_upload_flow(jobs, state, storage) + def run_intelligent_upload_flow(jobs, filepath, io, state, storage) bar = ProgressBar.new(jobs.length) generator = create_intelligent_generator(jobs) working_offset = FilestackConfig::DEFAULT_OFFSET_SIZE @@ -207,7 +207,7 @@ def run_intelligent_upload_flow(jobs, state, storage) batch = get_generator_batch(generator) # run parts Parallel.map(batch, in_threads: 4) do |part| - state = run_intelligent_uploads(part, state, storage) + state = run_intelligent_uploads(part, filepath, io, state, storage) # condition: a chunk has failed but we have not reached the maximum retries while bad_state(state) # condition: timeout to S3, requiring offset size to be changed @@ -219,7 +219,7 @@ def run_intelligent_upload_flow(jobs, state, storage) sleep(state.backoff) end state.add_retry - state = run_intelligent_uploads(part, state, storage) + state = run_intelligent_uploads(part, filepath, io, state, storage) end raise "Upload has failed. Please try again later." unless state.ok bar.increment! @@ -275,7 +275,7 @@ def create_upload_job_chunks(jobs, state, apikey, filename, filepath, filesize, # multipart_start # # @return [Dict] - def chunk_job(job, state, apikey, filename, filepath, filesize, start_response, storage) + def chunk_job(job, state, apikey, filename, filesize, start_response, storage) offset = 0 seek_point = job[:seek_point] chunk_list = [] @@ -283,7 +283,6 @@ def chunk_job(job, state, apikey, filename, filepath, filesize, start_response, while (offset < FilestackConfig::DEFAULT_CHUNK_SIZE) && (seek_point + offset) < filesize chunk_list.push( seek_point: seek_point, - filepath: filepath, filename: filename, apikey: apikey, part: job[:part], @@ -307,15 +306,14 @@ def chunk_job(job, state, apikey, filename, filepath, filesize, start_response, # @param [IntelligentState] state An IntelligentState object # # @return [IntelligentState] - def run_intelligent_uploads(part, state, storage) + def run_intelligent_uploads(part, filepath, io, state, storage) failed = false chunks = chunk_job( - part, state, part[:apikey], part[:filename], part[:filepath], - part[:filesize], part[:start_response], storage + part, state, part[:apikey], part[:filename], part[:filesize], part[:start_response], storage ) Parallel.map(chunks, in_threads: 3) do |chunk| begin - upload_chunk_intelligently(chunk, state, part[:apikey], part[:filepath], part[:options], storage) + upload_chunk_intelligently(chunk, state, part[:apikey], filepath, io, part[:options], storage) rescue => e state.error_type = e.message failed = true @@ -364,8 +362,8 @@ def run_intelligent_uploads(part, state, storage) # multipart uploads # # @return [Typhoeus::Response] - def upload_chunk_intelligently(job, state, apikey, filepath, options, storage) - file = File.open(filepath) + def upload_chunk_intelligently(job, state, apikey, filepath, io, options, storage) + file = filepath ? File.open(filepath) : io file.seek(job[:seek_point] + job[:offset]) chunk = file.read(state.offset) diff --git a/spec/filestack/ruby_spec.rb b/spec/filestack/ruby_spec.rb index 644ced7..fdf4fdc 100644 --- a/spec/filestack/ruby_spec.rb +++ b/spec/filestack/ruby_spec.rb @@ -49,9 +49,11 @@ def code @test_apikey = 'YOUR_API_KEY' @test_secret = 'YOUR_SECRET' @test_filepath = __dir__ + '/../../test-files/calvinandhobbes.jpg' + @test_io = StringIO.new(File.open(@test_filepath).read) @test_download = __dir__ + '/../../test-files/test' @test_filename = 'calvinandhobbes.jpg' @test_filesize = 10000 + @test_io_size = 16542 @test_mimetype = 'image/jpeg' @storage = 's3' @start_response = { @@ -84,6 +86,7 @@ def code @test_secure_client = FilestackClient.new(@test_apikey, security: @test_security) @test_secure_filelink = FilestackFilelink.new(@test_apikey, security: @test_security) @test_transform = Transform.new(apikey: @test_apikey, handle: @test_handle, security: @test_security) + @options = { filename: "filename.png" } end it 'has a version number' do @@ -154,9 +157,32 @@ def body expect(filelink.handle).to eq('somehandle') end - it 'Does not upload when both url and filepath are present' do - bad = @test_secure_client.upload(filepath: @test_filepath, external_url: 'someurl') - expect(bad).to eq('You cannot upload a URL and file at the same time') + it 'FilestackFilelink uploads io object' do + class UploadResponse + def code + 200 + end + + def body + { handle: 'somehandle', + url: 'https://cdn.filestackcontent.com/somehandle' }.to_json + end + + end + allow(Typhoeus).to receive(:post) + .and_return(UploadResponse.new) + filelink = @test_secure_client.upload(io: @test_io) + expect(filelink.handle).to eq('somehandle') + end + + it 'does not upload when both url and filepath are present' do + response = @test_secure_client.upload(filepath: @test_filepath, external_url: 'someurl') + expect(response).to eq('You cannot upload a URL and file at the same time') + end + + it 'does not upload when both io object and filepath are present' do + response = @test_secure_client.upload(filepath: @test_filepath, io: @test_io) + expect(response).to eq('You cannot upload IO object and file at the same time') end it 'zips corectly' do @@ -169,18 +195,27 @@ def body ## MULTIPART TESTING # ###################### - it 'returns the right file info' do + it 'returns the right file attributes' do allow(File).to receive(:basename).and_return(@test_filename) allow(File).to receive(:size).and_return(@test_filesize) filename, filesize, mimetype = - MultipartUploadUtils.get_file_info(@test_filepath) + MultipartUploadUtils.get_file_attributes(@test_filepath) expect(filename).to eq(@test_filename) expect(filesize).to eq(@test_filesize) expect(mimetype).to eq(@test_mimetype) end + it 'returns the right IO object attributes' do + filename, filesize, mimetype = + MultipartUploadUtils.get_io_attributes(@test_io, @options) + + expect(filename).to eq(@options[:filename]) + expect(filesize).to eq(@test_io_size) + expect(mimetype).to eq(FilestackConfig::DEFAULT_UPLOAD_MIMETYPE) + end + it 'returns the correct multipart_start response' do allow(Typhoeus).to receive(:post) .and_return(@response) @@ -194,10 +229,9 @@ def body it 'returns the correct create_upload_jobs array' do jobs = create_upload_jobs( - @test_apikey, @test_filename, @test_filepath, - @test_filesize, @start_response, @storage, {} + @test_apikey, @test_filename, @test_filesize, @start_response, @storage, {} ) - expect(jobs[0][:filepath]).to eq(@test_filepath) + expect(jobs[0][:filesize]).to eq(@test_filesize) end it 'returns correct upload_chunk response' do @@ -214,7 +248,7 @@ def body .and_return(@response) response = MultipartUploadUtils.upload_chunk( - @job, @test_apikey, @test_filepath, nil, @storage + @job, @test_apikey, @test_filepath, nil, nil, @storage ) expect(response.body).to eq(@response.body) end @@ -245,7 +279,7 @@ def code end results = MultipartUploadUtils.run_uploads( - jobs, @test_apikey, @test_filepath, nil, @storage + jobs, @test_apikey, @test_filepath, nil, nil, @storage ) 2.times do |i| expect(results[i]).to eq(result) @@ -261,7 +295,7 @@ def code expect(response.body).to eq(@response.body) end - it 'Multipart upload returns the correct response' do + it 'multipart_upload returns the correct response' do allow_any_instance_of(MultipartUploadUtils).to receive(:multipart_start) .and_return(@start_response) allow_any_instance_of(MultipartUploadUtils).to receive(:run_uploads) @@ -269,7 +303,7 @@ def code allow_any_instance_of(MultipartUploadUtils).to receive(:multipart_complete) .and_return(GeneralResponse.new(@start_response)) response = MultipartUploadUtils.multipart_upload( - @test_apikey, @test_filepath, nil, {}, 60, @storage, intelligent: false + @test_apikey, @test_filepath, nil, nil, {}, 60, @storage, false ) expect(response.to_json).to eq(@response.body) end @@ -305,7 +339,7 @@ def code allow_any_instance_of(IntelligentUtils).to receive(:run_intelligent_upload_flow) .and_return(true) allow_any_instance_of(MultipartUploadUtils).to receive(:multipart_complete) - .and_return(GeneralResponse.new({'handle' => 'somehandle'}, 202)) + .and_return(GeneralResponse.new({ 'handle' => 'somehandle' }, 202)) expect{@test_client.upload(filepath: @test_filepath, intelligent: true, timeout: 1)}.to raise_error(RuntimeError) end @@ -323,123 +357,123 @@ def code it 'runs intelligent upload flow without failure' do state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage) + IntelligentUtils.run_intelligent_upload_flow(jobs, @test_filepath, nil, state, @storage) expect(true) end it 'runs intelligent upload flow with failure' do state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) state.ok = false jobs = MultipartUploadUtils.create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, @test_filepath, nil, state, @storage)}.to raise_error(RuntimeError) end it 'runs intelligent uploads without error' do state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:upload_chunk_intelligently) .and_return(state) allow(Typhoeus).to receive(:post) .and_return(@response) - state = IntelligentUtils.run_intelligent_uploads(jobs[0], state, @storage) + state = IntelligentUtils.run_intelligent_uploads(jobs[0], @test_filepath, nil, state, @storage) expect(state.ok) end it 'runs intelligent uploads with failure error' do state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:upload_chunk_intelligently) .and_raise('FAILURE') - state = IntelligentUtils.run_intelligent_uploads(jobs[0], state, @storage) + state = IntelligentUtils.run_intelligent_uploads(jobs[0], @test_filepath, nil, state, @storage) expect(state.ok).to eq(false) expect(state.error_type).to eq('FAILURE') end it 'retries upon failure' do state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) state.ok = false state.error_type = 'BACKEND_SERVER' allow_any_instance_of(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, @test_filepath, nil, state, @storage)}.to raise_error(RuntimeError) end it 'retries upon network failure' do state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) state.ok = false state.error_type = 'S3_NETWORK' allow_any_instance_of(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, @test_filepath, nil, state, @storage)}.to raise_error(RuntimeError) end it 'retries upon server failure' do state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) state.ok = false state.error_type = 'S3_SERVER' allow_any_instance_of(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, @test_filepath, nil, state, @storage)}.to raise_error(RuntimeError) end it 'retries upon backend network failure' do state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) state.ok = false state.error_type = 'BACKEND_NETWORK' allow_any_instance_of(IntelligentUtils).to receive(:run_intelligent_uploads) .and_return(state) - expect {IntelligentUtils.run_intelligent_upload_flow(jobs, state, @storage)}.to raise_error(RuntimeError) + expect {IntelligentUtils.run_intelligent_upload_flow(jobs, @test_filepath, nil, state, @storage)}.to raise_error(RuntimeError) end it 'runs intelligent uploads with 400 error' do state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) allow(IntelligentUtils).to receive(:upload_chunk_intelligently) .and_return(true) allow(Typhoeus).to receive(:post) .and_return(Response.new(400)) - state = IntelligentUtils.run_intelligent_uploads(jobs[0], state, @storage) + state = IntelligentUtils.run_intelligent_uploads(jobs[0], @test_filepath, nil, state, @storage) expect(state.ok).to eq(false) end @@ -458,9 +492,9 @@ def code end state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) allow(Typhoeus).to receive(:post) @@ -468,7 +502,7 @@ def code allow(Typhoeus).to receive(:put) .and_return(@response) jobs[0][:offset] = 0 - response = IntelligentUtils.upload_chunk_intelligently(jobs[0], state, @test_apikey, @test_filepath, {}, @storage) + response = IntelligentUtils.upload_chunk_intelligently(jobs[0], state, @test_apikey, @test_filepath, nil, {}, @storage) expect(response.code).to eq(200) end @@ -487,9 +521,9 @@ def code end state = IntelligentState.new - filename, filesize, mimetype = MultipartUploadUtils.get_file_info(@test_filepath) + filename, filesize, mimetype = MultipartUploadUtils.get_file_attributes(@test_filepath) jobs = create_upload_jobs( - @test_apikey, filename, @test_filepath, filesize, @start_response, @storage, {} + @test_apikey, filename, filesize, @start_response, @storage, {} ) allow(Typhoeus).to receive(:post) @@ -497,7 +531,7 @@ def code allow(Typhoeus).to receive(:put) .and_return(Response.new(400)) jobs[0][:offset] = 0 - expect {IntelligentUtils.upload_chunk_intelligently(jobs[0], state, @test_apikey, @test_filepath, {}, @storage)}.to raise_error(RuntimeError) + expect {IntelligentUtils.upload_chunk_intelligently(jobs[0], state, @test_apikey, @test_filepath, nil, {}, @storage)}.to raise_error(RuntimeError) end