diff --git a/conf/carbon.amqp.conf.example b/conf/carbon.amqp.conf.example index 38e95c306..f634f0727 100644 --- a/conf/carbon.amqp.conf.example +++ b/conf/carbon.amqp.conf.example @@ -61,7 +61,7 @@ AMQP_VERBOSE = True # AMQP_METRIC_NAME_IN_BODY = False # NOTE: you cannot run both a cache and a relay on the same server -# with the default configuration, you have to specify a distinict +# with the default configuration, you have to specify distinct # interfaces and ports for the listeners. [relay] diff --git a/conf/carbon.conf.example b/conf/carbon.conf.example index 5ec1cc825..f722183c8 100644 --- a/conf/carbon.conf.example +++ b/conf/carbon.conf.example @@ -278,7 +278,7 @@ WHISPER_FALLOCATE_CREATE = True # CARBON_METRIC_PREFIX = carbon # CARBON_METRIC_INTERVAL = 60 -# Enable AMQP if you want to receve metrics using an amqp broker +# Enable AMQP if you want to receive metrics using an amqp broker # ENABLE_AMQP = False # Verbose means a line will be logged for every metric received @@ -444,7 +444,7 @@ DESTINATIONS = 127.0.0.1:2004 # This allows to have multiple connections per destinations, this will # pool all the replicas of a single host in the same queue and distribute -# points accross these replicas instead of replicating them. +# points across these replicas instead of replicating them. # The following example will balance the load between :0 and :1. ## DESTINATIONS = foo:2001:0, foo:2001:1 ## RELAY_METHOD = rules @@ -536,7 +536,7 @@ USE_RATIO_RESET=False MIN_RESET_STAT_FLOW=1000 # When the ratio of stats being sent in a reporting interval is far -# enough from 1.0, we will disconnect the socket and reconnecto to +# enough from 1.0, we will disconnect the socket and reconnect to # clear out queued stats. The default ratio of 0.9 indicates that 10% # of stats aren't being delivered within one CARBON_METRIC_INTERVAL # (default of 60 seconds), which can lead to a queue backup. Under diff --git a/lib/carbon/client.py b/lib/carbon/client.py index e98f76fdd..d9ba5a99c 100644 --- a/lib/carbon/client.py +++ b/lib/carbon/client.py @@ -503,7 +503,7 @@ class FakeClientFactory(object): """ def __init__(self): - # This queue isn't explicitely bounded but will implicitely be. It receives + # This queue isn't explicitly bounded but will implicitly be. It receives # only metrics when no destinations are available, and as soon as we detect # that we don't have any destination we pause the producer: this mean that # it will contain only a few seconds of metrics. @@ -531,7 +531,7 @@ class CarbonClientManager(Service): def __init__(self, router): if settings.DESTINATION_POOL_REPLICAS: # If we decide to open multiple TCP connection to a replica, we probably - # want to try to also load-balance accross hosts. In this case we need + # want to try to also load-balance across hosts. In this case we need # to make sure rfc3484 doesn't get in the way. if setUpRandomResolver: setUpRandomResolver(reactor) diff --git a/lib/carbon/conf.py b/lib/carbon/conf.py index f86ec307e..eb960675d 100644 --- a/lib/carbon/conf.py +++ b/lib/carbon/conf.py @@ -606,7 +606,7 @@ def read_config(program, options, **kwargs): raise CarbonConfigException("Either ROOT_DIR or GRAPHITE_ROOT " "needs to be provided.") - # Default config directory to root-relative, unless overriden by the + # Default config directory to root-relative, unless overridden by the # 'GRAPHITE_CONF_DIR' environment variable. settings.setdefault("CONF_DIR", os.environ.get("GRAPHITE_CONF_DIR", @@ -618,7 +618,7 @@ def read_config(program, options, **kwargs): # file. settings["CONF_DIR"] = dirname(normpath(options["config"])) - # Storage directory can be overriden by the 'GRAPHITE_STORAGE_DIR' + # Storage directory can be overridden by the 'GRAPHITE_STORAGE_DIR' # environment variable. It defaults to a path relative to GRAPHITE_ROOT # for backwards compatibility though. settings.setdefault("STORAGE_DIR", diff --git a/lib/carbon/instrumentation.py b/lib/carbon/instrumentation.py index 8f8b42965..2ade6d31f 100644 --- a/lib/carbon/instrumentation.py +++ b/lib/carbon/instrumentation.py @@ -152,7 +152,7 @@ def recordMetrics(): record('whitelistRejects', myStats.get('whitelistRejects', 0)) record('cpuUsage', getCpuUsage()) - # And here preserve count of messages received in the prior periiod + # And here preserve count of messages received in the prior period myPriorStats['metricsReceived'] = myStats.get('metricsReceived', 0) prior_stats.clear() prior_stats.update(myPriorStats) diff --git a/lib/carbon/tests/test_hashing.py b/lib/carbon/tests/test_hashing.py index 44612eaae..a190b064a 100644 --- a/lib/carbon/tests/test_hashing.py +++ b/lib/carbon/tests/test_hashing.py @@ -4,7 +4,7 @@ class HashIntegrityTest(unittest.TestCase): - def test_2_node_positional_itegrity(self): + def test_2_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([]) for n in range(2): @@ -13,7 +13,7 @@ def test_2_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_3_node_positional_itegrity(self): + def test_3_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([]) for n in range(3): @@ -22,7 +22,7 @@ def test_3_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_4_node_positional_itegrity(self): + def test_4_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([]) for n in range(4): @@ -31,7 +31,7 @@ def test_4_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_5_node_positional_itegrity(self): + def test_5_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([]) for n in range(5): @@ -40,7 +40,7 @@ def test_5_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_6_node_positional_itegrity(self): + def test_6_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([]) for n in range(6): @@ -49,7 +49,7 @@ def test_6_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_7_node_positional_itegrity(self): + def test_7_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([]) for n in range(7): @@ -58,7 +58,7 @@ def test_7_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_8_node_positional_itegrity(self): + def test_8_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([]) for n in range(8): @@ -67,7 +67,7 @@ def test_8_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_9_node_positional_itegrity(self): + def test_9_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([]) for n in range(9): @@ -93,7 +93,7 @@ def test_11_get_nodes(self): class FNVHashIntegrityTest(unittest.TestCase): - def test_2_node_positional_itegrity(self): + def test_2_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([], hash_type='fnv1a_ch') for n in range(2): @@ -102,7 +102,7 @@ def test_2_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_3_node_positional_itegrity(self): + def test_3_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([], hash_type='fnv1a_ch') for n in range(3): @@ -111,7 +111,7 @@ def test_3_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_4_node_positional_itegrity(self): + def test_4_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([], hash_type='fnv1a_ch') for n in range(4): @@ -120,7 +120,7 @@ def test_4_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_5_node_positional_itegrity(self): + def test_5_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([], hash_type='fnv1a_ch') for n in range(5): @@ -129,7 +129,7 @@ def test_5_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_6_node_positional_itegrity(self): + def test_6_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([], hash_type='fnv1a_ch') for n in range(6): @@ -138,7 +138,7 @@ def test_6_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_7_node_positional_itegrity(self): + def test_7_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([], hash_type='fnv1a_ch') for n in range(7): @@ -147,7 +147,7 @@ def test_7_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_8_node_positional_itegrity(self): + def test_8_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([], hash_type='fnv1a_ch') for n in range(8): @@ -156,7 +156,7 @@ def test_8_node_positional_itegrity(self): len([n[0] for n in ring.ring]), len(set([n[0] for n in ring.ring]))) - def test_9_node_positional_itegrity(self): + def test_9_node_positional_integrity(self): """Make a cluster, verify we don't have positional collisions""" ring = ConsistentHashRing([], hash_type='fnv1a_ch') for n in range(9): diff --git a/lib/carbon/util.py b/lib/carbon/util.py index a4b2e034d..279b6ce2c 100644 --- a/lib/carbon/util.py +++ b/lib/carbon/util.py @@ -167,7 +167,7 @@ def parseDestinations(destination_strings): # Yes this is duplicated in whisper. Yes, duplication is bad. # But the code is needed in both places and we do not want to create -# a dependency on whisper especiaily as carbon moves toward being a more +# a dependency on whisper especially as carbon moves toward being a more # generic storage service that can use various backends. UnitMultipliers = { 's': 1, @@ -424,7 +424,7 @@ def sanitize_name_as_tag_value(name): sanitized = name.lstrip('~') if len(sanitized) == 0: - raise Exception('Cannot use metric name %s as tag value, results in emptry string' % (name)) + raise Exception('Cannot use metric name %s as tag value, results in an empty string' % (name)) return sanitized diff --git a/lib/carbon/writer.py b/lib/carbon/writer.py index 7b63cba0a..7b2857d2d 100644 --- a/lib/carbon/writer.py +++ b/lib/carbon/writer.py @@ -36,7 +36,7 @@ AGGREGATION_SCHEMAS = loadAggregationSchemas() -# Inititalize token buckets so that we can enforce rate limits on creates and +# Initialize token buckets so that we can enforce rate limits on creates and # updates if the config wants them. CREATE_BUCKET = None UPDATE_BUCKET = None @@ -108,7 +108,7 @@ def writeCachedDataPoints(): # file then we'll just drop the metric on the ground and move on to the next # metric. # XXX This behavior should probably be configurable to no tdrop metrics - # when rate limitng unless our cache is too big or some other legit + # when rate limiting unless our cache is too big or some other legit # reason. instrumentation.increment('droppedCreates') continue @@ -159,7 +159,7 @@ def writeCachedDataPoints(): try: t1 = time.time() # If we have duplicated points, always pick the last. update_many() - # has no guaranted behavior for that, and in fact the current implementation + # has no guaranteed behavior for that, and in fact the current implementation # will keep the first point in the list. datapoints = dict(datapoints).items() state.database.write(metric, datapoints)