Permalink
Browse files

pep8 clean-up

  • Loading branch information...
1 parent 2ea40bd commit bd9fb5766b45ee12f6cf5ce513db0501868c778d @jtriley committed Jul 17, 2012
@@ -142,7 +142,7 @@ def parse_qacct(self, string, dtnow):
else:
end = self.qacct_to_datetime_tuple(l[13:len(l)])
if l.find('==========') != -1:
- if qd != None:
+ if qd is not None:
self.max_job_id = job_id
hash = {'queued': qd, 'start': start, 'end': end}
self.jobstats[job_id % self.jobstat_cachesize] = hash
@@ -256,7 +256,7 @@ def avg_job_duration(self):
count = 0
total_seconds = 0
for job in self.jobstats:
- if job != None:
+ if job is not None:
delta = job['end'] - job['start']
total_seconds += delta.seconds
count += 1
@@ -269,7 +269,7 @@ def avg_wait_time(self):
count = 0
total_seconds = 0
for job in self.jobstats:
- if job != None:
+ if job is not None:
delta = job['start'] - job['queued']
total_seconds += delta.seconds
count += 1
@@ -410,7 +410,7 @@ def __init__(self, interval=60, max_nodes=None, wait_time=900,
self.plot_stats = plot_stats
self.plot_output_dir = plot_output_dir
if plot_stats:
- assert self.visualizer != None
+ assert self.visualizer is not None
@property
def visualizer(self):
View
@@ -270,8 +270,8 @@ def list_clusters(self, cluster_groups=None, show_ssh_status=False):
d = devices.get(dev)
vol_id = d.volume_id
status = d.status
- print ' %s on %s:%s (status: %s)' % \
- (vol_id, node_id, dev, status)
+ print(' %s on %s:%s (status: %s)' %
+ (vol_id, node_id, dev, status))
else:
print 'EBS volumes: N/A'
spot_reqs = cl.spot_requests
@@ -290,7 +290,7 @@ def list_clusters(self, cluster_groups=None, show_ssh_status=False):
print 'Cluster nodes:'
for node in nodes:
nodeline = " %7s %s %s %s" % (node.alias, node.state,
- node.id, node.dns_name)
+ node.id, node.dns_name)
if node.spot_id:
nodeline += ' (spot %s)' % node.spot_id
if show_ssh_status:
@@ -319,31 +319,31 @@ def run_plugin(self, plugin_name, cluster_tag):
class Cluster(object):
def __init__(self,
- ec2_conn=None,
- spot_bid=None,
- cluster_tag=None,
- cluster_description=None,
- cluster_size=None,
- cluster_user=None,
- cluster_shell=None,
- master_image_id=None,
- master_instance_type=None,
- node_image_id=None,
- node_instance_type=None,
- node_instance_types=[],
- availability_zone=None,
- keyname=None,
- key_location=None,
- volumes=[],
- plugins=[],
- permissions=[],
- refresh_interval=30,
- disable_queue=False,
- num_threads=20,
- disable_threads=False,
- cluster_group=None,
- force_spot_master=False,
- **kwargs):
+ ec2_conn=None,
+ spot_bid=None,
+ cluster_tag=None,
+ cluster_description=None,
+ cluster_size=None,
+ cluster_user=None,
+ cluster_shell=None,
+ master_image_id=None,
+ master_instance_type=None,
+ node_image_id=None,
+ node_instance_type=None,
+ node_instance_types=[],
+ availability_zone=None,
+ keyname=None,
+ key_location=None,
+ volumes=[],
+ plugins=[],
+ permissions=[],
+ refresh_interval=30,
+ disable_queue=False,
+ num_threads=20,
+ disable_threads=False,
+ cluster_group=None,
+ force_spot_master=False,
+ **kwargs):
now = time.strftime("%Y%m%d%H%M")
self.ec2 = ec2_conn
View
@@ -56,11 +56,8 @@
"""
__version__ = '0.5.0-dev'
-__all__ = (
- 'validate_ip', 'ip2long', 'long2ip', 'ip2hex', 'hex2ip',
- 'validate_cidr', 'cidr2block',
- 'IpRange', 'IpRangeList',
- )
+__all__ = ('validate_ip', 'ip2long', 'long2ip', 'ip2hex', 'hex2ip',
+ 'validate_cidr', 'cidr2block', 'IpRange', 'IpRangeList')
import re
View
@@ -329,8 +329,8 @@ def get_user_map(self, key_by_uid=False):
key = name
if key_by_uid:
key = uid
- user_map[key] = utils.struct_passwd([name, passwd, uid, gid,
- gecos, home, shell])
+ user_map[key] = utils.struct_passwd([name, passwd, uid, gid, gecos,
+ home, shell])
return user_map
def getgrgid(self, gid):
@@ -477,8 +477,8 @@ def add_to_known_hosts(self, username, nodes, add_self=True):
for node in nodes:
server_pkey = node.ssh.get_server_public_key()
node_names = {}.fromkeys([node.alias, node.private_dns_name,
- node.private_dns_name_short],
- node.private_ip_address)
+ node.private_dns_name_short],
+ node.private_ip_address)
node_names[node.public_dns_name] = node.ip_address
for name, ip in node_names.items():
name_ip = "%s,%s" % (name, ip)
View
@@ -198,10 +198,10 @@ def extract_word(line, point):
def autocomplete(parser,
- arg_completer=None, # means use default.
- opt_completer=None,
- subcmd_completer=None,
- subcommands=None):
+ arg_completer=None, # means use default.
+ opt_completer=None,
+ subcmd_completer=None,
+ subcommands=None):
"""Automatically detect if we are requested completing and if so generate
completion automatically from given parser.
@@ -266,8 +266,7 @@ def autocomplete(parser,
assert isinstance(subcommands, types.DictType)
value = guess_first_nonoption(parser, subcommands)
if value:
- if isinstance(value, types.ListType) or \
- isinstance(value, types.TupleType):
+ if isinstance(value, (types.ListType, types.TupleType)):
parser = value[0]
if len(value) > 1 and value[1]:
# override completer for command if it is present.
@@ -335,16 +334,13 @@ def autocomplete(parser,
if completer and (not prefix or not prefix.startswith('-')):
# Call appropriate completer depending on type.
- if isinstance(completer, types.StringType) or \
- isinstance(completer, types.ListType) or \
- isinstance(completer, types.TupleType):
+ if isinstance(completer, (types.StringType, types.ListType,
+ types.TupleType)):
completer = RegexCompleter(completer)
completions += completer(os.getcwd(), cline,
cpoint, prefix, suffix)
- elif isinstance(completer, types.FunctionType) or \
- isinstance(completer, types.LambdaType) or \
- isinstance(completer, types.ClassType) or \
- isinstance(completer, types.ObjectType):
+ elif isinstance(completer, (types.FunctionType, types.LambdaType,
+ types.ClassType, types.ObjectType)):
completions += completer(os.getcwd(), cline,
cpoint, prefix, suffix)
@@ -2,8 +2,7 @@
from starcluster.clustersetup import DefaultClusterSetup
from starcluster.logger import log
-ndb_mgmd_template = \
-'''
+ndb_mgmd_template = """
[NDBD DEFAULT]
NoOfReplicas=%(num_replicas)s
DataMemory=%(data_memory)s # How much memory to allocate for data storage
@@ -16,18 +15,16 @@
# IP address of the management node (this system)
HostName=%(mgm_ip)s
# Section for the storage nodes
-'''
+"""
-ndb_mgmd_storage = \
-'''
+ndb_mgmd_storage = """
[NDBD]
HostName=%(storage_ip)s
DataDir=%(data_dir)s
BackupDataDir=%(backup_data_dir)s
-'''
+"""
-MY_CNF = \
-'''
+MY_CNF = """
#
# The MySQL database server configuration file.
#
@@ -164,7 +161,7 @@
# The files must end with '.cnf', otherwise they'll be ignored.
#
!includedir /etc/mysql/conf.d/
-'''
+"""
class MysqlCluster(DefaultClusterSetup):
@@ -30,7 +30,7 @@ def _supports_layout(self, node, envname, layout, window=''):
def _select_layout(self, node, envname, layout="main-vertical", window=''):
if layout not in self._layouts:
raise exception.PluginError("unknown layout (options: %s)" %
- ", ".join(self._layouts))
+ ", ".join(self._layouts))
cmd = 'tmux select-layout -t %s:%s %s'
return node.ssh.get_status(cmd % (envname, window, layout))
@@ -4,8 +4,9 @@
class SetupClass(ClusterSetup):
def __init__(self, my_arg, my_other_arg):
- log.debug("setupclass: my_arg = %s, my_other_arg = %s" % (my_arg,
- my_other_arg))
+ log.debug(
+ "setupclass: my_arg = %s, my_other_arg = %s" % (my_arg,
+ my_other_arg))
def run(self, nodes, master, user, shell, volumes):
log.debug('Hello from MYPLUGIN :D')
@@ -61,7 +61,7 @@ def test_shell_validation(self):
cases, '_validate_shell_setting')
if failed:
raise Exception('cluster allows invalid cluster shell (cases: %s)'
- % failed)
+ % failed)
def test_keypair_validation(self):
tmpfile = tempfile.NamedTemporaryFile()
@@ -267,13 +267,14 @@ def test_multiple_instance_types(self):
Test that config properly handles multiple instance types syntax
(within node_instance_type setting)
"""
- invalid_cases = [{'c1_node_type': 'c1.xlarge:ami-asdffdas'},
- {'c1_node_type': 'c1.xlarge:3'},
- {'c1_node_type': 'c1.xlarge:ami-asdffdas:3'},
- {'c1_node_type': 'c1.xlarge:asdf:asdf:asdf,m1.small'},
- {'c1_node_type': 'c1.asdf:4, m1.small'},
- {'c1_node_type': 'c1.xlarge: 0, m1.small'},
- {'c1_node_type': 'c1.xlarge:-1, m1.small'}]
+ invalid_cases = [
+ {'c1_node_type': 'c1.xlarge:ami-asdffdas'},
+ {'c1_node_type': 'c1.xlarge:3'},
+ {'c1_node_type': 'c1.xlarge:ami-asdffdas:3'},
+ {'c1_node_type': 'c1.xlarge:asdf:asdf:asdf,m1.small'},
+ {'c1_node_type': 'c1.asdf:4, m1.small'},
+ {'c1_node_type': 'c1.xlarge: 0, m1.small'},
+ {'c1_node_type': 'c1.xlarge:-1, m1.small'}]
for case in invalid_cases:
try:
self.get_custom_config(**case)
View
@@ -78,7 +78,7 @@ def myfunc():
My function took 0.000 mins
"""
prefix = msg
- if type(msg) == types.FunctionType:
+ if isinstance(msg, types.FunctionType):
prefix = msg.func_name
def wrap_f(func, *arg, **kargs):
@@ -93,7 +93,7 @@ def wrap_f(func, *arg, **kargs):
log.info(msg)
return res
- if type(msg) == types.FunctionType:
+ if isinstance(msg, types.FunctionType):
return decorator.decorator(wrap_f, msg)
else:
return decorator.decorator(wrap_f)

0 comments on commit bd9fb57

Please sign in to comment.