Skip to content

Commit

Permalink
Spelling (#893)
Browse files Browse the repository at this point in the history
* spelling: across

* spelling: distinct

* spelling: empty

* spelling: especially

* spelling: explicitly

* spelling: guaranteed

* spelling: implicitly

* spelling: initialize

* spelling: integrity

* spelling: limiting

* spelling: overridden

* spelling: period

* spelling: receive

* spelling: reconnect
  • Loading branch information
jsoref committed May 10, 2020
1 parent e4571ff commit a5f2181
Show file tree
Hide file tree
Showing 8 changed files with 30 additions and 30 deletions.
2 changes: 1 addition & 1 deletion conf/carbon.amqp.conf.example
Expand Up @@ -61,7 +61,7 @@ AMQP_VERBOSE = True
# AMQP_METRIC_NAME_IN_BODY = False

# NOTE: you cannot run both a cache and a relay on the same server
# with the default configuration, you have to specify a distinict
# with the default configuration, you have to specify distinct
# interfaces and ports for the listeners.

[relay]
Expand Down
6 changes: 3 additions & 3 deletions conf/carbon.conf.example
Expand Up @@ -278,7 +278,7 @@ WHISPER_FALLOCATE_CREATE = True
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60

# Enable AMQP if you want to receve metrics using an amqp broker
# Enable AMQP if you want to receive metrics using an amqp broker
# ENABLE_AMQP = False

# Verbose means a line will be logged for every metric received
Expand Down Expand Up @@ -444,7 +444,7 @@ DESTINATIONS = 127.0.0.1:2004

# This allows to have multiple connections per destinations, this will
# pool all the replicas of a single host in the same queue and distribute
# points accross these replicas instead of replicating them.
# points across these replicas instead of replicating them.
# The following example will balance the load between :0 and :1.
## DESTINATIONS = foo:2001:0, foo:2001:1
## RELAY_METHOD = rules
Expand Down Expand Up @@ -536,7 +536,7 @@ USE_RATIO_RESET=False
MIN_RESET_STAT_FLOW=1000

# When the ratio of stats being sent in a reporting interval is far
# enough from 1.0, we will disconnect the socket and reconnecto to
# enough from 1.0, we will disconnect the socket and reconnect to
# clear out queued stats. The default ratio of 0.9 indicates that 10%
# of stats aren't being delivered within one CARBON_METRIC_INTERVAL
# (default of 60 seconds), which can lead to a queue backup. Under
Expand Down
4 changes: 2 additions & 2 deletions lib/carbon/client.py
Expand Up @@ -503,7 +503,7 @@ class FakeClientFactory(object):
"""

def __init__(self):
# This queue isn't explicitely bounded but will implicitely be. It receives
# This queue isn't explicitly bounded but will implicitly be. It receives
# only metrics when no destinations are available, and as soon as we detect
# that we don't have any destination we pause the producer: this mean that
# it will contain only a few seconds of metrics.
Expand Down Expand Up @@ -531,7 +531,7 @@ class CarbonClientManager(Service):
def __init__(self, router):
if settings.DESTINATION_POOL_REPLICAS:
# If we decide to open multiple TCP connection to a replica, we probably
# want to try to also load-balance accross hosts. In this case we need
# want to try to also load-balance across hosts. In this case we need
# to make sure rfc3484 doesn't get in the way.
if setUpRandomResolver:
setUpRandomResolver(reactor)
Expand Down
4 changes: 2 additions & 2 deletions lib/carbon/conf.py
Expand Up @@ -606,7 +606,7 @@ def read_config(program, options, **kwargs):
raise CarbonConfigException("Either ROOT_DIR or GRAPHITE_ROOT "
"needs to be provided.")

# Default config directory to root-relative, unless overriden by the
# Default config directory to root-relative, unless overridden by the
# 'GRAPHITE_CONF_DIR' environment variable.
settings.setdefault("CONF_DIR",
os.environ.get("GRAPHITE_CONF_DIR",
Expand All @@ -618,7 +618,7 @@ def read_config(program, options, **kwargs):
# file.
settings["CONF_DIR"] = dirname(normpath(options["config"]))

# Storage directory can be overriden by the 'GRAPHITE_STORAGE_DIR'
# Storage directory can be overridden by the 'GRAPHITE_STORAGE_DIR'
# environment variable. It defaults to a path relative to GRAPHITE_ROOT
# for backwards compatibility though.
settings.setdefault("STORAGE_DIR",
Expand Down
2 changes: 1 addition & 1 deletion lib/carbon/instrumentation.py
Expand Up @@ -152,7 +152,7 @@ def recordMetrics():
record('whitelistRejects', myStats.get('whitelistRejects', 0))
record('cpuUsage', getCpuUsage())

# And here preserve count of messages received in the prior periiod
# And here preserve count of messages received in the prior period
myPriorStats['metricsReceived'] = myStats.get('metricsReceived', 0)
prior_stats.clear()
prior_stats.update(myPriorStats)
Expand Down
32 changes: 16 additions & 16 deletions lib/carbon/tests/test_hashing.py
Expand Up @@ -4,7 +4,7 @@

class HashIntegrityTest(unittest.TestCase):

def test_2_node_positional_itegrity(self):
def test_2_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(2):
Expand All @@ -13,7 +13,7 @@ def test_2_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_3_node_positional_itegrity(self):
def test_3_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(3):
Expand All @@ -22,7 +22,7 @@ def test_3_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_4_node_positional_itegrity(self):
def test_4_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(4):
Expand All @@ -31,7 +31,7 @@ def test_4_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_5_node_positional_itegrity(self):
def test_5_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(5):
Expand All @@ -40,7 +40,7 @@ def test_5_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_6_node_positional_itegrity(self):
def test_6_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(6):
Expand All @@ -49,7 +49,7 @@ def test_6_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_7_node_positional_itegrity(self):
def test_7_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(7):
Expand All @@ -58,7 +58,7 @@ def test_7_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_8_node_positional_itegrity(self):
def test_8_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(8):
Expand All @@ -67,7 +67,7 @@ def test_8_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_9_node_positional_itegrity(self):
def test_9_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([])
for n in range(9):
Expand All @@ -93,7 +93,7 @@ def test_11_get_nodes(self):

class FNVHashIntegrityTest(unittest.TestCase):

def test_2_node_positional_itegrity(self):
def test_2_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([], hash_type='fnv1a_ch')
for n in range(2):
Expand All @@ -102,7 +102,7 @@ def test_2_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_3_node_positional_itegrity(self):
def test_3_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([], hash_type='fnv1a_ch')
for n in range(3):
Expand All @@ -111,7 +111,7 @@ def test_3_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_4_node_positional_itegrity(self):
def test_4_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([], hash_type='fnv1a_ch')
for n in range(4):
Expand All @@ -120,7 +120,7 @@ def test_4_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_5_node_positional_itegrity(self):
def test_5_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([], hash_type='fnv1a_ch')
for n in range(5):
Expand All @@ -129,7 +129,7 @@ def test_5_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_6_node_positional_itegrity(self):
def test_6_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([], hash_type='fnv1a_ch')
for n in range(6):
Expand All @@ -138,7 +138,7 @@ def test_6_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_7_node_positional_itegrity(self):
def test_7_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([], hash_type='fnv1a_ch')
for n in range(7):
Expand All @@ -147,7 +147,7 @@ def test_7_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_8_node_positional_itegrity(self):
def test_8_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([], hash_type='fnv1a_ch')
for n in range(8):
Expand All @@ -156,7 +156,7 @@ def test_8_node_positional_itegrity(self):
len([n[0] for n in ring.ring]),
len(set([n[0] for n in ring.ring])))

def test_9_node_positional_itegrity(self):
def test_9_node_positional_integrity(self):
"""Make a cluster, verify we don't have positional collisions"""
ring = ConsistentHashRing([], hash_type='fnv1a_ch')
for n in range(9):
Expand Down
4 changes: 2 additions & 2 deletions lib/carbon/util.py
Expand Up @@ -167,7 +167,7 @@ def parseDestinations(destination_strings):

# Yes this is duplicated in whisper. Yes, duplication is bad.
# But the code is needed in both places and we do not want to create
# a dependency on whisper especiaily as carbon moves toward being a more
# a dependency on whisper especially as carbon moves toward being a more
# generic storage service that can use various backends.
UnitMultipliers = {
's': 1,
Expand Down Expand Up @@ -424,7 +424,7 @@ def sanitize_name_as_tag_value(name):
sanitized = name.lstrip('~')

if len(sanitized) == 0:
raise Exception('Cannot use metric name %s as tag value, results in emptry string' % (name))
raise Exception('Cannot use metric name %s as tag value, results in an empty string' % (name))

return sanitized

Expand Down
6 changes: 3 additions & 3 deletions lib/carbon/writer.py
Expand Up @@ -36,7 +36,7 @@
AGGREGATION_SCHEMAS = loadAggregationSchemas()


# Inititalize token buckets so that we can enforce rate limits on creates and
# Initialize token buckets so that we can enforce rate limits on creates and
# updates if the config wants them.
CREATE_BUCKET = None
UPDATE_BUCKET = None
Expand Down Expand Up @@ -108,7 +108,7 @@ def writeCachedDataPoints():
# file then we'll just drop the metric on the ground and move on to the next
# metric.
# XXX This behavior should probably be configurable to no tdrop metrics
# when rate limitng unless our cache is too big or some other legit
# when rate limiting unless our cache is too big or some other legit
# reason.
instrumentation.increment('droppedCreates')
continue
Expand Down Expand Up @@ -159,7 +159,7 @@ def writeCachedDataPoints():
try:
t1 = time.time()
# If we have duplicated points, always pick the last. update_many()
# has no guaranted behavior for that, and in fact the current implementation
# has no guaranteed behavior for that, and in fact the current implementation
# will keep the first point in the list.
datapoints = dict(datapoints).items()
state.database.write(metric, datapoints)
Expand Down

0 comments on commit a5f2181

Please sign in to comment.