Skip to content

Commit

Permalink
Update cloud objectstore to adhere with cloudbridge v1 interface.
Browse files Browse the repository at this point in the history
  • Loading branch information
VJalili authored and nsoranzo committed Sep 10, 2018
1 parent 64bb339 commit ec320d2
Showing 1 changed file with 16 additions and 16 deletions.
32 changes: 16 additions & 16 deletions lib/galaxy/objectstore/cloud.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,10 +171,10 @@ def __clean_cache(self, file_list, delete_this_much):

def _get_bucket(self, bucket_name):
try:
bucket = self.conn.object_store.get(bucket_name)
bucket = self.conn.storage.buckets.get(bucket_name)
if bucket is None:
log.debug("Bucket not found, creating a bucket with handle '%s'", bucket_name)
bucket = self.conn.object_store.create(bucket_name)
bucket = self.conn.storage.buckets.create(bucket_name)
log.debug("Using cloud ObjectStore with bucket '%s'", bucket.name)
return bucket
except Exception:
Expand Down Expand Up @@ -239,7 +239,7 @@ def _get_transfer_progress(self):

def _get_size_in_cloud(self, rel_path):
try:
obj = self.bucket.get(rel_path)
obj = self.bucket.objects.get(rel_path)
if obj:
return obj.size
except Exception:
Expand All @@ -252,13 +252,13 @@ def _key_exists(self, rel_path):
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.list(prefix=rel_path)
keyresult = self.bucket.objects.list(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
exists = True if self.bucket.get(rel_path) is not None else False
exists = True if self.bucket.objects.get(rel_path) is not None else False
except Exception:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
Expand Down Expand Up @@ -288,7 +288,7 @@ def _transfer_cb(self, complete, total):
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.get(rel_path)
key = self.bucket.objects.get(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
Expand Down Expand Up @@ -322,27 +322,27 @@ def _push_to_os(self, rel_path, source_file=None, from_string=None):
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
if os.path.getsize(source_file) == 0 and (self.bucket.get(rel_path) is not None):
if os.path.getsize(source_file) == 0 and (self.bucket.objects.get(rel_path) is not None):
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file,
rel_path)
return True
if from_string:
if not self.bucket.get(rel_path):
created_obj = self.bucket.create_object(rel_path)
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload(source_file)
else:
self.bucket.get(rel_path).upload(source_file)
self.bucket.objects.get(rel_path).upload(source_file)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file,
os.path.getsize(source_file), rel_path)
self.transfer_progress = 0 # Reset transfer progress counter
if not self.bucket.get(rel_path):
created_obj = self.bucket.create_object(rel_path)
if not self.bucket.objects.get(rel_path):
created_obj = self.bucket.objects.create(rel_path)
created_obj.upload_from_file(source_file)
else:
self.bucket.get(rel_path).upload_from_file(source_file)
self.bucket.objects.get(rel_path).upload_from_file(source_file)

end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
Expand Down Expand Up @@ -468,7 +468,7 @@ def delete(self, obj, entire_dir=False, **kwargs):
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
results = self.bucket.list(prefix=rel_path)
results = self.bucket.objects.list(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
Expand All @@ -478,7 +478,7 @@ def delete(self, obj, entire_dir=False, **kwargs):
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = self.bucket.get(rel_path)
key = self.bucket.objects.get(rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
Expand Down Expand Up @@ -566,7 +566,7 @@ def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = self.bucket.get(rel_path)
key = self.bucket.objects.get(rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except Exception:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
Expand Down

0 comments on commit ec320d2

Please sign in to comment.