diff --git a/cron/build_chrom_db.py b/cron/build_chrom_db.py
index ce472d257ee1..118ecd53cb40 100644
--- a/cron/build_chrom_db.py
+++ b/cron/build_chrom_db.py
@@ -17,8 +17,8 @@
import os
import sys
+import requests
from six.moves.urllib.parse import urlencode
-from six.moves.urllib.request import urlopen
import parse_builds
@@ -36,8 +36,8 @@ def getchrominfo(url, db):
"hgta_regionType": "",
"position": "",
"hgta_doTopSubmit": "get info"})
- page = urlopen(URL)
- for line in page:
+ page = requests.get(URL).text
+ for line in page.split('\n'):
line = line.rstrip( "\r\n" )
if line.startswith("#"):
continue
diff --git a/cron/parse_builds.py b/cron/parse_builds.py
index c30197184f83..243440a331bb 100644
--- a/cron/parse_builds.py
+++ b/cron/parse_builds.py
@@ -9,18 +9,17 @@
import sys
import xml.etree.ElementTree as ElementTree
-from six.moves.urllib.request import urlopen
+import requests
def getbuilds(url):
try:
- page = urlopen(url)
+ text = requests.get(url).text
except:
print("#Unable to open " + url)
print("?\tunspecified (?)")
sys.exit(1)
- text = page.read()
try:
tree = ElementTree.fromstring(text)
except:
diff --git a/cron/parse_builds_3_sites.py b/cron/parse_builds_3_sites.py
index b22cd449bf96..4ae43e52765e 100644
--- a/cron/parse_builds_3_sites.py
+++ b/cron/parse_builds_3_sites.py
@@ -6,7 +6,7 @@
import xml.etree.ElementTree as ElementTree
-from six.moves.urllib.request import urlopen
+import requests
sites = ['http://genome.ucsc.edu/cgi-bin/',
'http://archaea.ucsc.edu/cgi-bin/',
@@ -20,11 +20,11 @@ def main():
trackurl = sites[i] + "hgTracks?"
builds = []
try:
- page = urlopen(site)
+ text = requests.get(site).text
except:
print("#Unable to connect to " + site)
continue
- text = page.read()
+
try:
tree = ElementTree.fromstring(text)
except:
diff --git a/lib/galaxy/datatypes/binary.py b/lib/galaxy/datatypes/binary.py
index 87882101b50b..c4436faa8a68 100644
--- a/lib/galaxy/datatypes/binary.py
+++ b/lib/galaxy/datatypes/binary.py
@@ -285,10 +285,8 @@ def merge(split_files, output_file):
def _is_coordinate_sorted( self, file_name ):
"""See if the input BAM file is sorted from the header information."""
- params = [ "samtools", "view", "-H", file_name ]
- output = subprocess.Popen( params, stderr=subprocess.PIPE, stdout=subprocess.PIPE ).communicate()[0]
- # find returns -1 if string is not found
- return output.find( "SO:coordinate" ) != -1 or output.find( "SO:sorted" ) != -1
+ output = subprocess.check_output(["samtools", "view", "-H", file_name])
+ return 'SO:coordinate' in output or 'SO:sorted' in output
def dataset_content_needs_grooming( self, file_name ):
"""See if file_name is a sorted BAM file"""
@@ -313,8 +311,7 @@ def dataset_content_needs_grooming( self, file_name ):
return False
index_name = tempfile.NamedTemporaryFile( prefix="bam_index" ).name
stderr_name = tempfile.NamedTemporaryFile( prefix="bam_index_stderr" ).name
- command = 'samtools index %s %s' % ( file_name, index_name )
- proc = subprocess.Popen( args=command, shell=True, stderr=open( stderr_name, 'wb' ) )
+ proc = subprocess.Popen(['samtools', 'index', file_name, index_name], stderr=open(stderr_name, 'wb'))
proc.wait()
stderr = open( stderr_name ).read().strip()
if stderr:
@@ -357,8 +354,8 @@ def groom_dataset_content( self, file_name ):
tmp_sorted_dataset_file_name_prefix = os.path.join( tmp_dir, 'sorted' )
stderr_name = tempfile.NamedTemporaryFile( dir=tmp_dir, prefix="bam_sort_stderr" ).name
samtools_created_sorted_file_name = "%s.bam" % tmp_sorted_dataset_file_name_prefix # samtools accepts a prefix, not a filename, it always adds .bam to the prefix
- command = "samtools sort %s %s" % ( file_name, tmp_sorted_dataset_file_name_prefix )
- proc = subprocess.Popen( args=command, shell=True, cwd=tmp_dir, stderr=open( stderr_name, 'wb' ) )
+ proc = subprocess.Popen(['samtools', 'sort', file_name, tmp_sorted_dataset_file_name_prefix],
+ cwd=tmp_dir, stderr=open(stderr_name, 'wb'))
exit_code = proc.wait()
# Did sort succeed?
stderr = open( stderr_name ).read().strip()
diff --git a/lib/galaxy/datatypes/converters/interval_to_coverage.py b/lib/galaxy/datatypes/converters/interval_to_coverage.py
index e58b3ec764ba..e4494491f35b 100644
--- a/lib/galaxy/datatypes/converters/interval_to_coverage.py
+++ b/lib/galaxy/datatypes/converters/interval_to_coverage.py
@@ -133,8 +133,9 @@ def close(self):
# Sort through a tempfile first
temp_file = tempfile.NamedTemporaryFile(mode="r")
environ['LC_ALL'] = 'POSIX'
- commandline = "sort -f -n -k %d -k %d -k %d -o %s %s" % (chr_col_1 + 1, start_col_1 + 1, end_col_1 + 1, temp_file.name, in_fname)
- subprocess.check_call(commandline, shell=True)
+ subprocess.check_call([
+ 'sort', '-f', '-n', '-k', chr_col_1 + 1, '-k', start_col_1 + 1, '-k', end_col_1 + 1, '-o', temp_file.name, in_fname
+ ])
coverage = CoverageWriter( out_stream=open(out_fname, "a"),
chromCol=chr_col_2, positionCol=position_col_2,
diff --git a/lib/galaxy/datatypes/converters/lped_to_pbed_converter.py b/lib/galaxy/datatypes/converters/lped_to_pbed_converter.py
index fc7ef10adbc3..6548aad1b8c6 100644
--- a/lib/galaxy/datatypes/converters/lped_to_pbed_converter.py
+++ b/lib/galaxy/datatypes/converters/lped_to_pbed_converter.py
@@ -72,9 +72,9 @@ def rgConv(inpedfilepath, outhtmlname, outfilepath, plink):
if not missval:
print('### lped_to_pbed_converter.py cannot identify missing value in %s' % pedf)
missval = '0'
- cl = '%s --noweb --file %s --make-bed --out %s --missing-genotype %s' % (plink, inpedfilepath, outroot, missval)
- p = subprocess.Popen(cl, shell=True, cwd=outfilepath)
- p.wait() # run plink
+ subprocess.check_call([plink, '--noweb', '--file', inpedfilepath,
+ '--make-bed', '--out', outroot,
+ '--missing-genotype', missval], cwd=outfilepath)
def main():
diff --git a/lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py b/lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py
index 02a6541fe428..9a913b877be6 100644
--- a/lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py
+++ b/lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py
@@ -41,8 +41,7 @@ def pruneLD(plinktasks=[], cd='./', vclbase=[]):
for task in plinktasks: # each is a list
vcl = vclbase + task
with open(plog, 'w') as sto:
- x = subprocess.Popen(' '.join(vcl), shell=True, stdout=sto, stderr=sto, cwd=cd)
- x.wait()
+ subprocess.check_call(vcl, stdout=sto, stderr=sto, cwd=cd)
try:
lplog = open(plog, 'r').readlines()
lplog = [elem for elem in lplog if elem.find('Pruning SNP') == -1]
diff --git a/lib/galaxy/datatypes/converters/pbed_to_lped_converter.py b/lib/galaxy/datatypes/converters/pbed_to_lped_converter.py
index ed45a204db1f..dc4bec51cb32 100644
--- a/lib/galaxy/datatypes/converters/pbed_to_lped_converter.py
+++ b/lib/galaxy/datatypes/converters/pbed_to_lped_converter.py
@@ -40,9 +40,7 @@ def rgConv(inpedfilepath, outhtmlname, outfilepath, plink):
"""
basename = os.path.split(inpedfilepath)[-1] # get basename
outroot = os.path.join(outfilepath, basename)
- cl = '%s --noweb --bfile %s --recode --out %s ' % (plink, inpedfilepath, outroot)
- p = subprocess.Popen(cl, shell=True, cwd=outfilepath)
- p.wait() # run plink
+ subprocess.check_call([plink, '--noweb', '--bfile', inpedfilepath, '--recode', '--out', outroot], cwd=outfilepath)
def main():
diff --git a/lib/galaxy/datatypes/converters/sam_to_bam.py b/lib/galaxy/datatypes/converters/sam_to_bam.py
index 6fe14fed0554..1aad981fd9bc 100644
--- a/lib/galaxy/datatypes/converters/sam_to_bam.py
+++ b/lib/galaxy/datatypes/converters/sam_to_bam.py
@@ -68,8 +68,10 @@ def __main__():
# convert to SAM
unsorted_bam_filename = os.path.join( tmp_dir, 'unsorted.bam' )
unsorted_stderr_filename = os.path.join( tmp_dir, 'unsorted.stderr' )
- cmd = "samtools view -bS '%s' > '%s'" % ( input_filename, unsorted_bam_filename )
- proc = subprocess.Popen( args=cmd, stderr=open( unsorted_stderr_filename, 'wb' ), shell=True, cwd=tmp_dir )
+ proc = subprocess.Popen(['samtools', 'view', '-bS', input_filename],
+ stdout=open(unsorted_bam_filename, 'wb'),
+ stderr=open(unsorted_stderr_filename, 'wb'),
+ cwd=tmp_dir)
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
@@ -90,10 +92,13 @@ def __main__():
# samtools changed sort command arguments (starting from version 1.3)
samtools_version = LooseVersion(_get_samtools_version())
if samtools_version < LooseVersion('1.0'):
- cmd = "samtools sort -o '%s' '%s' > '%s'" % ( unsorted_bam_filename, sorting_prefix, output_filename )
+ sort_args = ['-o', unsorted_bam_filename, sorting_prefix]
else:
- cmd = "samtools sort -T '%s' '%s' > '%s'" % ( sorting_prefix, unsorted_bam_filename, output_filename )
- proc = subprocess.Popen( args=cmd, stderr=open( sorted_stderr_filename, 'wb' ), shell=True, cwd=tmp_dir )
+ sort_args = ['-T', sorting_prefix, unsorted_bam_filename]
+ proc = subprocess.Popen(['samtools', 'sort'] + sort_args,
+ stdout=open(output_filename, 'wb'),
+ stderr=open(sorted_stderr_filename, 'wb'),
+ cwd=tmp_dir)
return_code = proc.wait()
if return_code:
diff --git a/lib/galaxy/datatypes/sequence.py b/lib/galaxy/datatypes/sequence.py
index d1660145064b..20aef31458ca 100644
--- a/lib/galaxy/datatypes/sequence.py
+++ b/lib/galaxy/datatypes/sequence.py
@@ -8,6 +8,7 @@
import os
import re
import string
+import subprocess
import sys
from cgi import escape
@@ -662,8 +663,7 @@ def process_split_file(data):
else:
commands = Sequence.get_split_commands_sequential(is_gzip(input_name), input_name, output_name, start_sequence, sequence_count)
for cmd in commands:
- if 0 != os.system(cmd):
- raise Exception("Executing '%s' failed" % cmd)
+ subprocess.check_call(cmd, shell=True)
return True
process_split_file = staticmethod(process_split_file)
diff --git a/lib/galaxy/datatypes/tabular.py b/lib/galaxy/datatypes/tabular.py
index b52a1e491a09..97d4e0551be9 100644
--- a/lib/galaxy/datatypes/tabular.py
+++ b/lib/galaxy/datatypes/tabular.py
@@ -9,6 +9,7 @@
import logging
import os
import re
+import shutil
import subprocess
import sys
import tempfile
@@ -519,15 +520,12 @@ def merge( split_files, output_file):
Multiple SAM files may each have headers. Since the headers should all be the same, remove
the headers from files 1-n, keeping them in the first file only
"""
- cmd = 'mv %s %s' % ( split_files[0], output_file )
- result = os.system(cmd)
- if result != 0:
- raise Exception('Result %s from %s' % (result, cmd))
+ shutil.move(split_files[0], output_file)
+
if len(split_files) > 1:
- cmd = 'egrep -v -h "^@" %s >> %s' % ( ' '.join(split_files[1:]), output_file )
- result = os.system(cmd)
- if result != 0:
- raise Exception('Result %s from %s' % (result, cmd))
+ cmd = ['egrep', '-v', '-h', '^@'] + split_files[1:] + ['>>', output_file]
+ subprocess.check_call(cmd, shell=True)
+
merge = staticmethod(merge)
# Dataproviders
diff --git a/lib/galaxy/datatypes/text.py b/lib/galaxy/datatypes/text.py
index a1dc9388d65c..0be154d9d53b 100644
--- a/lib/galaxy/datatypes/text.py
+++ b/lib/galaxy/datatypes/text.py
@@ -10,6 +10,8 @@
import subprocess
import tempfile
+from six.moves import shlex_quote
+
from galaxy.datatypes.data import get_file_peek, Text
from galaxy.datatypes.metadata import MetadataElement, MetadataParameter
from galaxy.datatypes.sniff import get_headers
@@ -144,13 +146,12 @@ def _display_data_trusted(self, trans, dataset, preview=False, filename=None, to
ofilename = ofile_handle.name
ofile_handle.close()
try:
- cmd = 'ipython nbconvert --to html --template full %s --output %s' % (dataset.file_name, ofilename)
- log.info("Calling command %s" % cmd)
- subprocess.call(cmd, shell=True)
+ cmd = ['ipython', 'nbconvert', '--to', 'html', '--template', 'full', dataset.file_name, '--output', ofilename]
+ subprocess.check_call(cmd)
ofilename = '%s.html' % ofilename
- except:
+ except subprocess.CalledProcessError:
ofilename = dataset.file_name
- log.exception( 'Command "%s" failed. Could not convert the IPython Notebook to HTML, defaulting to plain text.' % cmd )
+ log.exception('Command "%s" failed. Could not convert the IPython Notebook to HTML, defaulting to plain text.', ' '.join(map(shlex_quote, cmd)))
return open( ofilename )
def set_meta( self, dataset, **kwd ):
diff --git a/lib/galaxy/external_services/actions.py b/lib/galaxy/external_services/actions.py
index 9e4ed1a14e32..fe6a74aaeb8f 100644
--- a/lib/galaxy/external_services/actions.py
+++ b/lib/galaxy/external_services/actions.py
@@ -1,6 +1,6 @@
# Contains actions that are used in External Services
import logging
-from urllib import urlopen
+import requests
from galaxy.web import url_for
from galaxy.util.template import fill_template
from result_handlers.basic import ExternalServiceActionResultHandler
@@ -104,7 +104,7 @@ def __init__( self, name, param_dict, url, method, target ): # display_handler
@property
def content( self ):
if self._content is None:
- self._content = urlopen( self.url ).read()
+ self._content = requests.get(self.url).text
return self._content
diff --git a/lib/galaxy/jobs/deferred/pacific_biosciences_smrt_portal.py b/lib/galaxy/jobs/deferred/pacific_biosciences_smrt_portal.py
index 6d03ee8e110c..07dd58b7ed92 100644
--- a/lib/galaxy/jobs/deferred/pacific_biosciences_smrt_portal.py
+++ b/lib/galaxy/jobs/deferred/pacific_biosciences_smrt_portal.py
@@ -2,14 +2,14 @@
Module for managing jobs in Pacific Bioscience's SMRT Portal and automatically transferring files
produced by SMRT Portal.
"""
-import json
import logging
from string import Template
-from six.moves.urllib.request import urlopen
+import requests
from .data_transfer import DataTransfer
+
log = logging.getLogger( __name__ )
__all__ = ( 'SMRTPortalPlugin', )
@@ -87,9 +87,9 @@ def check_job( self, job ):
if job.params[ 'type' ] == 'init_transfer':
if self._missing_params( job.params, [ 'smrt_host', 'smrt_job_id' ] ):
return self.job_states.INVALID
- url = 'http://' + job.params[ 'smrt_host' ] + self.api_path + '/Jobs/' + job.params[ 'smrt_job_id' ] + '/Status'
- r = urlopen( url )
- status = json.loads( r.read() )
+ url = 'http://' + job.params['smrt_host'] + self.api_path + '/Jobs/' + job.params['smrt_job_id'] + '/Status'
+ r = requests.get(url)
+ status = r.json()
# TODO: error handling: unexpected json or bad response, bad url, etc.
if status[ 'Code' ] == 'Completed':
log.debug( "SMRT Portal job '%s' is Completed. Initiating transfer." % job.params[ 'smrt_job_id' ] )
diff --git a/lib/galaxy/jobs/runners/pulsar.py b/lib/galaxy/jobs/runners/pulsar.py
index ff0ed03ea38f..57d378dffacc 100644
--- a/lib/galaxy/jobs/runners/pulsar.py
+++ b/lib/galaxy/jobs/runners/pulsar.py
@@ -7,6 +7,7 @@
import errno
import logging
import os
+import subprocess
from distutils.version import LooseVersion
from time import sleep
@@ -217,7 +218,7 @@ def __init_pulsar_app( self, pulsar_conf_path ):
else:
log.info("Loading Pulsar app configuration from %s" % pulsar_conf_path)
with open(pulsar_conf_path, "r") as f:
- conf.update(yaml.load(f) or {})
+ conf.update(yaml.safe_load(f) or {})
if "job_metrics_config_file" not in conf:
conf["job_metrics"] = self.app.job_metrics
if "staging_directory" not in conf:
@@ -382,8 +383,7 @@ def __prepare_input_files_locally(self, job_wrapper):
prepare_input_files_cmds = getattr(job_wrapper, 'prepare_input_files_cmds', None)
if prepare_input_files_cmds is not None:
for cmd in prepare_input_files_cmds: # run the commands to stage the input files
- if 0 != os.system(cmd):
- raise Exception('Error running file staging command: %s' % cmd)
+ subprocess.check_call(cmd, shell=True)
job_wrapper.prepare_input_files_cmds = None # prevent them from being used in-line
def _populate_parameter_defaults( self, job_destination ):
diff --git a/lib/galaxy/jobs/runners/util/job_script/__init__.py b/lib/galaxy/jobs/runners/util/job_script/__init__.py
index 1bcc391fc6b6..3a29ac2cc436 100644
--- a/lib/galaxy/jobs/runners/util/job_script/__init__.py
+++ b/lib/galaxy/jobs/runners/util/job_script/__init__.py
@@ -1,3 +1,4 @@
+import logging
import os
import subprocess
import time
@@ -8,6 +9,7 @@
from galaxy.util import unicodify
+log = logging.getLogger(__name__)
DEFAULT_SHELL = '/bin/bash'
DEFAULT_JOB_FILE_TEMPLATE = Template(
@@ -118,12 +120,13 @@ def _handle_script_integrity(path, config):
sleep_amt = getattr(config, "check_job_script_integrity_sleep", DEFAULT_INTEGRITY_SLEEP)
for i in range(count):
try:
- proc = subprocess.Popen([path], shell=True, env={"ABC_TEST_JOB_SCRIPT_INTEGRITY_XYZ": "1"})
- proc.wait()
- if proc.returncode == 42:
+ returncode = subprocess.call([path], env={"ABC_TEST_JOB_SCRIPT_INTEGRITY_XYZ": "1"})
+ if returncode == 42:
script_integrity_verified = True
break
+ log.debug("Script integrity error: returncode was %d", returncode)
+
# Else we will sync and wait to see if the script becomes
# executable.
try:
@@ -131,11 +134,13 @@ def _handle_script_integrity(path, config):
# These have occurred both in Docker containers and on EC2 clusters
# under high load.
subprocess.check_call(INTEGRITY_SYNC_COMMAND)
- except Exception:
- pass
- time.sleep(sleep_amt)
- except Exception:
- pass
+ except Exception as e:
+ log.debug("Error syncing the filesystem: %s", unicodify(e))
+
+ except Exception as exc:
+ log.debug("Script not available yet: %s", unicodify(exc))
+
+ time.sleep(sleep_amt)
if not script_integrity_verified:
raise Exception("Failed to write job script, could not verify job script integrity.")
diff --git a/lib/galaxy/jobs/runners/util/kill.py b/lib/galaxy/jobs/runners/util/kill.py
index 52022466552c..4abce160eb2e 100644
--- a/lib/galaxy/jobs/runners/util/kill.py
+++ b/lib/galaxy/jobs/runners/util/kill.py
@@ -1,7 +1,7 @@
import os
+import subprocess
from platform import system
from time import sleep
-from subprocess import Popen
try:
from psutil import Process, NoSuchProcess
@@ -41,8 +41,8 @@ def _stock_kill_pid(pid):
def __kill_windows(pid):
try:
- Popen("taskkill /F /T /PID %i" % pid, shell=True)
- except Exception:
+ subprocess.check_call(['taskkill', '/F', '/T', '/PID', pid])
+ except subprocess.CalledProcessError:
pass
diff --git a/lib/galaxy/jobs/transfer_manager.py b/lib/galaxy/jobs/transfer_manager.py
index ccd5c88e67fb..2d2eb162af86 100644
--- a/lib/galaxy/jobs/transfer_manager.py
+++ b/lib/galaxy/jobs/transfer_manager.py
@@ -9,6 +9,8 @@
import subprocess
import threading
+from six.moves import shlex_quote
+
from galaxy.util import listify, sleeper
from galaxy.util.json import jsonrpc_request, validate_jsonrpc_response
@@ -22,8 +24,8 @@ class TransferManager( object ):
def __init__( self, app ):
self.app = app
self.sa_session = app.model.context.current
- self.command = 'python %s' % os.path.abspath( os.path.join( os.getcwd(), 'scripts', 'transfer.py' ) )
- if app.config.get_bool( 'enable_job_recovery', True ):
+ self.command = ['python', os.path.abspath(os.path.join(os.getcwd(), 'scripts', 'transfer.py'))]
+ if app.config.get_bool('enable_job_recovery', True):
# Only one Galaxy server process should be able to recover jobs! (otherwise you'll have nasty race conditions)
self.running = True
self.sleeper = sleeper.Sleeper()
@@ -67,9 +69,9 @@ def run( self, transfer_jobs ):
# The transfer script should daemonize fairly quickly - if this is
# not the case, this process will need to be moved to a
# non-blocking method.
- cmd = '%s %s' % ( self.command, tj.id )
- log.debug( 'Transfer command is: %s' % cmd )
- p = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+ cmd = self.command + [tj.id]
+ log.debug('Transfer command is: %s', ' '.join(map(shlex_quote, cmd)))
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
output = p.stdout.read( 32768 )
if p.returncode != 0:
diff --git a/lib/galaxy/managers/citations.py b/lib/galaxy/managers/citations.py
index ef83443d67aa..9e0613648483 100644
--- a/lib/galaxy/managers/citations.py
+++ b/lib/galaxy/managers/citations.py
@@ -1,6 +1,6 @@
import functools
import os
-import urllib2
+import requests
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
@@ -47,10 +47,8 @@ def __init__( self, config ):
def _raw_get_bibtex( self, doi ):
dx_url = "http://dx.doi.org/" + doi
headers = {'Accept': 'text/bibliography; style=bibtex, application/x-bibtex'}
- req = urllib2.Request(dx_url, data="", headers=headers)
- response = urllib2.urlopen(req)
- bibtex = response.read()
- return bibtex
+ req = requests.get(dx_url, headers=headers)
+ return req.text
def get_bibtex( self, doi ):
createfunc = functools.partial(self._raw_get_bibtex, doi)
diff --git a/lib/galaxy/objectstore/s3.py b/lib/galaxy/objectstore/s3.py
index 919d9d19ca40..ca87266cfe3c 100644
--- a/lib/galaxy/objectstore/s3.py
+++ b/lib/galaxy/objectstore/s3.py
@@ -18,6 +18,7 @@
safe_relpath,
string_as_bool,
umask_fix_perms,
+ which,
)
from galaxy.util.sleeper import Sleeper
@@ -66,10 +67,9 @@ def __init__(self, config, config_xml):
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
- try:
- subprocess.call('axel')
+ if which('axel'):
self.use_axel = True
- except OSError:
+ else:
self.use_axel = False
def _configure_connection(self):
@@ -340,7 +340,7 @@ def _download(self, rel_path):
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
- ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
+ ret_code = subprocess.call(['axel', '-a', '-n', ncores, url])
if ret_code == 0:
return True
else:
diff --git a/lib/galaxy/tools/data/__init__.py b/lib/galaxy/tools/data/__init__.py
index f13d4c9b1b4b..91493d9ad0c0 100644
--- a/lib/galaxy/tools/data/__init__.py
+++ b/lib/galaxy/tools/data/__init__.py
@@ -15,7 +15,8 @@
from glob import glob
from tempfile import NamedTemporaryFile
-from urllib2 import urlopen
+
+import requests
from galaxy import util
from galaxy.util.odict import odict
@@ -293,7 +294,7 @@ def configure_and_load( self, config_element, tool_data_path, from_shed_config=F
if filename:
tmp_file = NamedTemporaryFile( prefix='TTDT_URL_%s-' % self.name )
try:
- tmp_file.write( urlopen( filename, timeout=url_timeout ).read() )
+ tmp_file.write(requests.get(filename, timeout=url_timeout).text)
except Exception as e:
log.error( 'Error loading Data Table URL "%s": %s', filename, e )
continue
diff --git a/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py b/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py
index 932779c0e438..46e12d837566 100644
--- a/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py
+++ b/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py
@@ -11,10 +11,11 @@
import optparse
import tarfile
import tempfile
-import urllib2
import math
from base64 import b64decode
+import requests
+
# Set max size of archive/file that will be handled to be 100 GB. This is
# arbitrary and should be adjusted as needed.
MAX_SIZE = 100 * math.pow( 2, 30 )
@@ -25,18 +26,16 @@ def url_to_file( url, dest_file ):
Transfer a file from a remote URL to a temporary file.
"""
try:
- url_reader = urllib2.urlopen( url )
+ url_reader = requests.get(url, stream=True)
CHUNK = 10 * 1024 # 10k
total = 0
fp = open( dest_file, 'wb')
- while True:
- chunk = url_reader.read( CHUNK )
- if not chunk:
- break
- fp.write( chunk )
- total += CHUNK
- if total > MAX_SIZE:
- break
+ for chunk in url_reader.iter_content(chunk_size=CHUNK):
+ if chunk:
+ fp.write(chunk)
+ total += CHUNK
+ if total > MAX_SIZE:
+ break
fp.close()
return dest_file
except Exception as e:
diff --git a/lib/galaxy/tools/toolbox/parser.py b/lib/galaxy/tools/toolbox/parser.py
index b81e6421cb6b..0900de11fdab 100644
--- a/lib/galaxy/tools/toolbox/parser.py
+++ b/lib/galaxy/tools/toolbox/parser.py
@@ -59,7 +59,7 @@ class YamlToolConfSource(ToolConfSource):
def __init__(self, config_filename):
with open(config_filename, "r") as f:
- as_dict = yaml.load(f)
+ as_dict = yaml.safe_load(f)
self.as_dict = as_dict
def parse_tool_path(self):
diff --git a/lib/galaxy/tours/__init__.py b/lib/galaxy/tours/__init__.py
index c497f2de284d..c2f517fa5195 100644
--- a/lib/galaxy/tours/__init__.py
+++ b/lib/galaxy/tours/__init__.py
@@ -68,7 +68,7 @@ def _load_tour_from_path(self, tour_path):
tour_id = os.path.splitext(filename)[0]
try:
with open(tour_path) as handle:
- conf = yaml.load(handle)
+ conf = yaml.safe_load(handle)
tour = tour_loader(conf)
self.tours[tour_id] = tour_loader(conf)
log.info("Loaded tour '%s'" % tour_id)
diff --git a/lib/galaxy/util/plugin_config.py b/lib/galaxy/util/plugin_config.py
index c03633692cc9..8095d279fe9c 100644
--- a/lib/galaxy/util/plugin_config.py
+++ b/lib/galaxy/util/plugin_config.py
@@ -76,4 +76,4 @@ def __read_yaml(path):
raise ImportError("Attempting to read YAML configuration file - but PyYAML dependency unavailable.")
with open(path, "rb") as f:
- return yaml.load(f)
+ return yaml.safe_load(f)
diff --git a/lib/galaxy/web/base/interactive_environments.py b/lib/galaxy/web/base/interactive_environments.py
index f9fdcf6fba3b..d6b09e45daea 100644
--- a/lib/galaxy/web/base/interactive_environments.py
+++ b/lib/galaxy/web/base/interactive_environments.py
@@ -93,7 +93,7 @@ def load_allowed_images(self):
raise Exception("[{0}] Could not find allowed_images.yml, or image tag in {0}.ini file for ".format(self.attr.viz_id))
with open(fn, 'r') as handle:
- self.allowed_images = [x['image'] for x in yaml.load(handle)]
+ self.allowed_images = [x['image'] for x in yaml.safe_load(handle)]
if len(self.allowed_images) == 0:
raise Exception("No allowed images specified for " + self.attr.viz_id)
diff --git a/lib/galaxy/web/proxy/__init__.py b/lib/galaxy/web/proxy/__init__.py
index 8db25a730b6f..993bc487965b 100644
--- a/lib/galaxy/web/proxy/__init__.py
+++ b/lib/galaxy/web/proxy/__init__.py
@@ -7,9 +7,10 @@
from galaxy.util.lazy_process import LazyProcess, NoOpLazyProcess
from galaxy.util import sqlite
from galaxy.util import unique_id
-import urllib2
import time
+import requests
+
log = logging.getLogger( __name__ )
@@ -244,20 +245,16 @@ def handle_requests(self, authentication, proxy_requests, route_name, container_
'ContainerIds': container_ids,
}
- req = urllib2.Request(self.api_url)
- req.add_header('Content-Type', 'application/json')
-
# Sometimes it takes our poor little proxy a second or two to get
# going, so if this fails, re-call ourselves with an increased timeout.
try:
- urllib2.urlopen(req, json.dumps(values))
- except urllib2.URLError as err:
- log.debug(err)
+ requests.get(self.api_url, headers={'Content-Type': 'application/json'}, data=json.dumps(values))
+ except requests.exceptions.ConnectionError as err:
+ log.exception(err)
if sleep > 5:
excp = "Could not contact proxy after %s seconds" % sum(range(sleep + 1))
raise Exception(excp)
time.sleep(sleep)
self.handle_requests(authentication, proxy_requests, route_name, container_ids, sleep=sleep + 1)
- pass
# TODO: MQ diven proxy?
diff --git a/lib/galaxy/webapps/galaxy/controllers/async.py b/lib/galaxy/webapps/galaxy/controllers/async.py
index c0dca96de903..6eee8972a1b0 100644
--- a/lib/galaxy/webapps/galaxy/controllers/async.py
+++ b/lib/galaxy/webapps/galaxy/controllers/async.py
@@ -2,9 +2,13 @@
Upload class
"""
+from __future__ import absolute_import
+
import logging
import urllib
+import requests
+
from galaxy import jobs, web
from galaxy.util import Params
from galaxy.util.hash_util import hmac_new
@@ -131,8 +135,7 @@ def index(self, trans, tool_id=None, data_secret=None, **kwd):
url = "%s%s%s" % ( url, url_join_char, urllib.urlencode( params.flatten() ) )
log.debug("connecting to -> %s" % url)
trans.log_event( "Async connecting to -> %s" % url )
- text = urllib.urlopen(url).read(-1)
- text = text.strip()
+ text = requests.get(url).text.strip()
if not text.endswith('OK'):
raise Exception( text )
data.state = data.blurb = data.states.RUNNING
diff --git a/lib/galaxy/webapps/galaxy/controllers/library_common.py b/lib/galaxy/webapps/galaxy/controllers/library_common.py
index 680bb46e823c..87252bd143b6 100644
--- a/lib/galaxy/webapps/galaxy/controllers/library_common.py
+++ b/lib/galaxy/webapps/galaxy/controllers/library_common.py
@@ -1,4 +1,7 @@
+from __future__ import absolute_import
+
import glob
+import json
import logging
import operator
import os
@@ -8,10 +11,9 @@
import tarfile
import tempfile
import urllib
-import urllib2
import zipfile
-from json import dumps, loads
+import requests
from markupsafe import escape
from sqlalchemy import and_, false
from sqlalchemy.orm import eagerload_all
@@ -554,7 +556,7 @@ def __ok_to_edit_metadata( ldda_id ):
if len(em_string):
payload = None
try:
- payload = loads(em_string)
+ payload = json.loads(em_string)
except Exception:
message = 'Invalid JSON input'
status = 'error'
@@ -1121,8 +1123,8 @@ def upload_dataset( self, trans, cntrller, library_id, folder_id, replace_datase
json_file_path = upload_common.create_paramfile( trans, uploaded_datasets )
data_list = [ ud.data for ud in uploaded_datasets ]
job_params = {}
- job_params['link_data_only'] = dumps( kwd.get( 'link_data_only', 'copy_files' ) )
- job_params['uuid'] = dumps( kwd.get( 'uuid', None ) )
+ job_params['link_data_only'] = json.dumps(kwd.get('link_data_only', 'copy_files'))
+ job_params['uuid'] = json.dumps(kwd.get('uuid', None))
job, output = upload_common.create_job( trans, tool_params, tool, json_file_path, data_list, folder=library_bunch.folder, job_params=job_params )
trans.sa_session.add( job )
trans.sa_session.flush()
@@ -2760,9 +2762,7 @@ def lucene_search( trans, cntrller, search_term, search_url, **kwd ):
message = escape( kwd.get( 'message', '' ) )
status = kwd.get( 'status', 'done' )
full_url = "%s/find?%s" % ( search_url, urllib.urlencode( { "kwd" : search_term } ) )
- response = urllib2.urlopen( full_url )
- ldda_ids = loads( response.read() )[ "ids" ]
- response.close()
+ ldda_ids = requests.get(full_url).json()['ids']
lddas = [ trans.sa_session.query( trans.app.model.LibraryDatasetDatasetAssociation ).get( ldda_id ) for ldda_id in ldda_ids ]
return status, message, get_sorted_accessible_library_items( trans, cntrller, lddas, 'name' )
diff --git a/lib/galaxy/webapps/galaxy/controllers/root.py b/lib/galaxy/webapps/galaxy/controllers/root.py
index 09eb7b432f74..494f68d5fc0d 100644
--- a/lib/galaxy/webapps/galaxy/controllers/root.py
+++ b/lib/galaxy/webapps/galaxy/controllers/root.py
@@ -1,10 +1,12 @@
"""
Contains the main interface in the Universe class
"""
+from __future__ import absolute_import
+
import cgi
import os
-import urllib
+import requests
from paste.httpexceptions import HTTPNotFound, HTTPBadGateway
from galaxy import web
@@ -480,8 +482,8 @@ def welcome( self, trans ):
def bucket_proxy( self, trans, bucket=None, **kwd):
if bucket:
trans.response.set_content_type( 'text/xml' )
- b_list_xml = urllib.urlopen('http://s3.amazonaws.com/%s/' % bucket)
- return b_list_xml.read()
+ b_list_xml = requests.get('http://s3.amazonaws.com/%s/' % bucket)
+ return b_list_xml.text
raise Exception("You must specify a bucket")
# ---- Debug methods ----------------------------------------------------
diff --git a/lib/galaxy/webapps/galaxy/controllers/visualization.py b/lib/galaxy/webapps/galaxy/controllers/visualization.py
index f0195cf1c896..2b0998973d51 100644
--- a/lib/galaxy/webapps/galaxy/controllers/visualization.py
+++ b/lib/galaxy/webapps/galaxy/controllers/visualization.py
@@ -1041,7 +1041,7 @@ def gie_list( self, trans, **kwargs ):
continue
with open( image_file, 'r' ) as handle:
- self.gie_image_map[gie] = yaml.load( handle )
+ self.gie_image_map[gie] = yaml.safe_load(handle)
return trans.fill_template_mako(
"visualization/gie.mako",
diff --git a/lib/galaxy/webapps/galaxy/controllers/workflow.py b/lib/galaxy/webapps/galaxy/controllers/workflow.py
index 37150705caa6..8e832ff34837 100644
--- a/lib/galaxy/webapps/galaxy/controllers/workflow.py
+++ b/lib/galaxy/webapps/galaxy/controllers/workflow.py
@@ -1,10 +1,12 @@
+from __future__ import absolute_import
+
import base64
import httplib
import json
import logging
import os
import sgmllib
-import urllib2
+import requests
from sqlalchemy import and_
from sqlalchemy.sql import expression
@@ -914,7 +916,7 @@ def import_workflow( self, trans, cntrller='workflow', **kwd ):
# Load workflow from external URL
# NOTE: blocks the web thread.
try:
- workflow_data = urllib2.urlopen( url ).read()
+ workflow_data = requests.get(url).text
except Exception as e:
message = "Failed to open URL: %s
Exception: %s" % ( escape( url ), escape( str( e ) ) )
status = 'error'
diff --git a/lib/galaxy/webapps/reports/controllers/system.py b/lib/galaxy/webapps/reports/controllers/system.py
index 419701a9a72e..72c614b92379 100644
--- a/lib/galaxy/webapps/reports/controllers/system.py
+++ b/lib/galaxy/webapps/reports/controllers/system.py
@@ -1,5 +1,6 @@
import logging
import os
+import subprocess
from datetime import datetime, timedelta
from decimal import Decimal
@@ -148,12 +149,11 @@ def dataset_info( self, trans, **kwd ):
message=message )
def get_disk_usage( self, file_path ):
- df_cmd = 'df -h ' + file_path
is_sym_link = os.path.islink( file_path )
file_system = disk_size = disk_used = disk_avail = disk_cap_pct = mount = None
- df_file = os.popen( df_cmd )
- while True:
- df_line = df_file.readline()
+ df_output = subprocess.check_output(['df', '-h', file_path])
+
+ for df_line in df_output:
df_line = df_line.strip()
if df_line:
df_line = df_line.lower()
@@ -176,7 +176,6 @@ def get_disk_usage( self, file_path ):
pass
else:
break # EOF
- df_file.close()
return ( file_system, disk_size, disk_used, disk_avail, disk_cap_pct, mount )
@web.expose
diff --git a/lib/galaxy/webapps/tool_shed/controllers/upload.py b/lib/galaxy/webapps/tool_shed/controllers/upload.py
index 1d005c7f7f29..84aa5bafb56c 100644
--- a/lib/galaxy/webapps/tool_shed/controllers/upload.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/upload.py
@@ -3,7 +3,8 @@
import shutil
import tarfile
import tempfile
-import urllib
+
+import requests
from galaxy import util
from galaxy import web
@@ -74,7 +75,7 @@ def upload( self, trans, **kwd ):
elif url:
valid_url = True
try:
- stream = urllib.urlopen( url )
+ stream = requests.get(url, stream=True)
except Exception as e:
valid_url = False
message = 'Error uploading file via http: %s' % str( e )
@@ -83,11 +84,9 @@ def upload( self, trans, **kwd ):
if valid_url:
fd, uploaded_file_name = tempfile.mkstemp()
uploaded_file = open( uploaded_file_name, 'wb' )
- while 1:
- chunk = stream.read( util.CHUNK_SIZE )
- if not chunk:
- break
- uploaded_file.write( chunk )
+ for chunk in stream.iter_content(chunk_size=util.CHUNK_SIZE):
+ if chunk:
+ uploaded_file.write(chunk)
uploaded_file.flush()
uploaded_file_filename = url.split( '/' )[ -1 ]
isempty = os.path.getsize( os.path.abspath( uploaded_file_name ) ) == 0
diff --git a/lib/tool_shed/capsule/capsule_manager.py b/lib/tool_shed/capsule/capsule_manager.py
index de198432d0b8..3b5e3a62ea4a 100644
--- a/lib/tool_shed/capsule/capsule_manager.py
+++ b/lib/tool_shed/capsule/capsule_manager.py
@@ -7,7 +7,7 @@
import threading
from time import gmtime, strftime
-from six.moves.urllib.request import urlopen
+import requests
from sqlalchemy import and_, false
import tool_shed.repository_types.util as rt_util
@@ -809,24 +809,20 @@ def upload_capsule( self, **kwd ):
uploaded_file=None,
capsule_file_name=None )
if url:
- valid_url = True
try:
- stream = urlopen( url )
+ stream = requests.get(url, stream=True)
except Exception as e:
- valid_url = False
return_dict['error_message'] = 'Error importing file via http: %s' % str( e )
return_dict['status'] = 'error'
return return_dict
- if valid_url:
- fd, uploaded_file_name = tempfile.mkstemp()
- uploaded_file = open( uploaded_file_name, 'wb' )
- while 1:
- chunk = stream.read( CHUNK_SIZE )
- if not chunk:
- break
- uploaded_file.write( chunk )
- uploaded_file.flush()
- uploaded_file_filename = url.split( '/' )[ -1 ]
+
+ fd, uploaded_file_name = tempfile.mkstemp()
+ uploaded_file = open( uploaded_file_name, 'wb' )
+ for chunk in stream.iter_content(chunk_size=CHUNK_SIZE):
+ if chunk:
+ uploaded_file.write(chunk)
+ uploaded_file.flush()
+ uploaded_file_filename = url.split( '/' )[ -1 ]
elif file_data not in ( '', None ):
uploaded_file = file_data.file
uploaded_file_name = uploaded_file.name
diff --git a/scripts/data_libraries/build_lucene_index.py b/scripts/data_libraries/build_lucene_index.py
index 5ffec9c9d752..04fd89af0d37 100644
--- a/scripts/data_libraries/build_lucene_index.py
+++ b/scripts/data_libraries/build_lucene_index.py
@@ -14,7 +14,8 @@
import os
import sys
import urllib
-import urllib2
+
+import requests
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'lib')))
@@ -39,9 +40,7 @@ def main( ini_file ):
def build_index( search_url, dataset_file ):
url = "%s/index?%s" % ( search_url, urllib.urlencode( { "docfile": dataset_file } ) )
- request = urllib2.Request( url )
- request.get_method = lambda: "PUT"
- urllib2.urlopen( request )
+ requests.put(url)
def create_dataset_file( dataset_iter ):
diff --git a/scripts/edam_mapping.py b/scripts/edam_mapping.py
index f82685a91fd6..f483db8c1f5e 100644
--- a/scripts/edam_mapping.py
+++ b/scripts/edam_mapping.py
@@ -16,9 +16,10 @@
import os
import sys
-import urllib2
from xml import etree
+import requests
+
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'lib')))
import galaxy.model
@@ -35,7 +36,7 @@
if not os.path.exists("/tmp/edam.owl"):
- open("/tmp/edam.owl", "w").write( urllib2.urlopen( EDAM_OWL_URL ).read() )
+ open("/tmp/edam.owl", "w").write(requests.get(EDAM_OWL_URL).text)
owl_xml_tree = etree.ElementTree.parse("/tmp/edam.owl")
diff --git a/scripts/microbes/harvest_bacteria.py b/scripts/microbes/harvest_bacteria.py
index 9b0bfb8eb0f0..de985bc67ef0 100644
--- a/scripts/microbes/harvest_bacteria.py
+++ b/scripts/microbes/harvest_bacteria.py
@@ -8,9 +8,9 @@
import sys
import time
from ftplib import FTP
-from urllib2 import urlopen
from urllib import urlretrieve
+import requests
from BeautifulSoup import BeautifulSoup
from util import get_bed_from_genbank, get_bed_from_glimmer3, get_bed_from_GeneMarkHMM, get_bed_from_GeneMark
@@ -26,7 +26,7 @@
# number, name, chroms, kingdom, group, genbank, refseq, info_url, ftp_url
def iter_genome_projects( url="http://www.ncbi.nlm.nih.gov/genomes/lproks.cgi?view=1", info_url_base="http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=genomeprj&cmd=Retrieve&dopt=Overview&list_uids=" ):
- for row in BeautifulSoup( urlopen( url ) ).findAll( name='tr', bgcolor=["#EEFFDD", "#E8E8DD"] ):
+ for row in BeautifulSoup(requests.get(url).text).findAll(name='tr', bgcolor=["#EEFFDD", "#E8E8DD"]):
row = str( row ).replace( "\n", "" ).replace( "\r", "" )
fields = row.split( "" )
@@ -65,7 +65,7 @@ def get_chroms_by_project_id( org_num, base_url="http://www.ncbi.nlm.nih.gov/ent
html_count += 1
url = "%s%s" % ( base_url, org_num )
try:
- html = urlopen( url )
+ html = requests.get(url).text
except:
print "GENOME PROJECT FAILED:", html_count, "org:", org_num, url
html = None
diff --git a/scripts/tool_shed/api/export.py b/scripts/tool_shed/api/export.py
index 0d18e2b6abc4..6057a1c3dfe0 100644
--- a/scripts/tool_shed/api/export.py
+++ b/scripts/tool_shed/api/export.py
@@ -11,7 +11,8 @@
import os
import sys
import tempfile
-import urllib2
+
+import requests
sys.path.insert( 1, os.path.join( os.path.dirname( __file__ ), os.pardir, os.pardir, os.pardir, 'lib' ) )
from tool_shed.util import basic_util
@@ -98,24 +99,11 @@ def main( options ):
download_url = export_dict[ 'download_url' ]
download_dir = os.path.abspath( options.download_dir )
file_path = os.path.join( download_dir, repositories_archive_filename )
- src = None
- dst = None
- try:
- src = urllib2.urlopen( download_url )
- dst = open( file_path, 'wb' )
- while True:
- chunk = src.read( CHUNK_SIZE )
+ src = requests.get(download_url, stream=True)
+ with open(file_path, 'wb') as dst:
+ for chunk in src.iter_content(chunk_size=CHUNK_SIZE):
if chunk:
- dst.write( chunk )
- else:
- break
- except:
- raise
- finally:
- if src:
- src.close()
- if dst:
- dst.close()
+ dst.write(chunk)
print "Successfully exported revision ", options.changeset_revision, " of repository ", options.name, " owned by ", options.owner
print "to location ", file_path
else:
diff --git a/scripts/transfer.py b/scripts/transfer.py
index 3ac085b982a9..fe1c123b2a57 100644
--- a/scripts/transfer.py
+++ b/scripts/transfer.py
@@ -215,6 +215,7 @@ def transfer( app, transfer_job_id ):
def http_transfer( transfer_job ):
"""Plugin" for handling http(s) transfers."""
url = transfer_job.params['url']
+ assert url.startswith('http://') or url.startswith('https://')
try:
f = urllib2.urlopen( url )
except urllib2.URLError as e:
diff --git a/test/api/test_workflows.py b/test/api/test_workflows.py
index a8d3476c1c63..34453fde4a19 100644
--- a/test/api/test_workflows.py
+++ b/test/api/test_workflows.py
@@ -178,7 +178,7 @@ def read_test_data(test_dict):
)
if jobs_descriptions is None:
assert source_type != "path"
- jobs_descriptions = yaml.load( has_workflow )
+ jobs_descriptions = yaml.safe_load(has_workflow)
test_data = jobs_descriptions.get("test_data", {})
diff --git a/test/api/test_workflows_from_yaml.py b/test/api/test_workflows_from_yaml.py
index cc269146a54f..a2c066b754a2 100644
--- a/test/api/test_workflows_from_yaml.py
+++ b/test/api/test_workflows_from_yaml.py
@@ -55,11 +55,11 @@ def test_simple_upload(self):
assert tool_count['random_lines1'] == 1
assert tool_count['cat1'] == 2
-# FIXME: This test fails on some machines due to (we're guessing) yaml loading
+# FIXME: This test fails on some machines due to (we're guessing) yaml.safe_loading
# order being not guaranteed and inconsistent across platforms. The workflow
-# yaml loader probably needs to enforce order using something like the
+# yaml.safe_loader probably needs to enforce order using something like the
# approach described here:
-# https://stackoverflow.com/questions/13297744/pyyaml-control-ordering-of-items-called-by-yaml-load
+# https://stackoverflow.com/questions/13297744/pyyaml-control-ordering-of-items-called-by-yaml.safe_load
# def test_multiple_input( self ):
# history_id = self.dataset_populator.new_history()
# self._run_jobs("""
diff --git a/test/api/workflows_format_2/converter.py b/test/api/workflows_format_2/converter.py
index 05a9a319c497..c35ec2797c06 100644
--- a/test/api/workflows_format_2/converter.py
+++ b/test/api/workflows_format_2/converter.py
@@ -32,7 +32,7 @@
def yaml_to_workflow(has_yaml, galaxy_interface, workflow_directory):
"""Convert a Format 2 workflow into standard Galaxy format from supplied stream."""
- as_python = yaml.load(has_yaml)
+ as_python = yaml.safe_load(has_yaml)
return python_to_workflow(as_python, galaxy_interface, workflow_directory)
@@ -109,7 +109,7 @@ def _python_to_workflow(as_python, conversion_context):
run_action_path = run_action["@import"]
runnable_path = os.path.join(conversion_context.workflow_directory, run_action_path)
with open(runnable_path, "r") as f:
- runnable_description = yaml.load(f)
+ runnable_description = yaml.safe_load(f)
run_action = runnable_description
run_class = run_action["class"]
diff --git a/test/api/workflows_format_2/main.py b/test/api/workflows_format_2/main.py
index 774585684948..c1e13ede3e1e 100644
--- a/test/api/workflows_format_2/main.py
+++ b/test/api/workflows_format_2/main.py
@@ -20,7 +20,7 @@ def convert_and_import_workflow(has_workflow, **kwds):
if workflow_directory is None:
workflow_directory = os.path.dirname(has_workflow)
with open(workflow_path, "r") as f:
- has_workflow = yaml.load(f)
+ has_workflow = yaml.safe_load(f)
if workflow_directory is not None:
workflow_directory = os.path.abspath(workflow_directory)
diff --git a/test/unit/workflows/workflow_support.py b/test/unit/workflows/workflow_support.py
index e21887b4f405..beb71bc53512 100644
--- a/test/unit/workflows/workflow_support.py
+++ b/test/unit/workflows/workflow_support.py
@@ -74,7 +74,7 @@ def get_tool_id( self, tool_id ):
def yaml_to_model(has_dict, id_offset=100):
if isinstance(has_dict, str):
- has_dict = yaml.load(has_dict)
+ has_dict = yaml.safe_load(has_dict)
workflow = model.Workflow()
workflow.steps = []