Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Remove all remaining traces of the unused service cache.
  • Loading branch information
spladug committed Jan 10, 2012
1 parent 5f0c23a commit 706b27b
Show file tree
Hide file tree
Showing 5 changed files with 1 addition and 250 deletions.
113 changes: 0 additions & 113 deletions r2/draw_load.py

This file was deleted.

4 changes: 0 additions & 4 deletions r2/example.ini
Expand Up @@ -85,8 +85,6 @@ timezone = UTC
display_timezone = MST
# secret key for accessing /shutdown
shutdown_secret = 12345
# list of servers that the service monitor will care about
monitored_servers = reddit, localhost
# https api endpoint (must be g.domain or a subdomain of g.domain)
https_endpoint =
# name of the cookie to drop with login information
Expand All @@ -110,8 +108,6 @@ stalecaches =
# render caches (the second is "remote" and the local is optional but in the same format)
local_rendercache =
rendercaches = 127.0.0.1:11211
# cache for storing service monitor information
servicecaches = 127.0.0.1:11211

# -- permacache options --
# permacache is memcaches -> cassanda -> memcachedb
Expand Down
14 changes: 1 addition & 13 deletions r2/r2/lib/app_globals.py
Expand Up @@ -104,11 +104,9 @@ class Globals(object):
'memcaches',
'permacache_memcaches',
'rendercaches',
'servicecaches',
'cassandra_seeds',
'admins',
'sponsors',
'monitored_servers',
'automatic_reddits',
'agents',
'allowed_css_linked_domains',
Expand Down Expand Up @@ -278,11 +276,6 @@ def setup(self, global_conf):
num_clients = num_mc_clients)))
self.cache_chains.update(rendercache=self.rendercache)

self.servicecache = MemcacheChain((localcache_cls(),
CMemcache(self.servicecaches,
num_clients = num_mc_clients)))
self.cache_chains.update(servicecache=self.servicecache)

self.thing_cache = CacheChain((localcache_cls(),))
self.cache_chains.update(thing_cache=self.thing_cache)

Expand Down Expand Up @@ -400,8 +393,6 @@ def to_iter(v, delim = ','):
return (x.strip() for x in v.split(delim) if x)

def load_db_params(self, gc):
from r2.lib.services import get_db_load

self.databases = tuple(self.to_iter(gc['databases']))
self.db_params = {}
if not self.databases:
Expand All @@ -425,10 +416,7 @@ def load_db_params(self, gc):
if params['max_overflow'] == "*":
params['max_overflow'] = self.db_pool_overflow_size

ip = params['db_host']
ip_loads = get_db_load(self.servicecache, ip)
if ip not in ip_loads or ip_loads[ip][0] < 1000:
dbm.setup_db(db_name, g_override=self, **params)
dbm.setup_db(db_name, g_override=self, **params)
self.db_params[db_name] = params

dbm.type_db = dbm.get_engine(gc['type_db'])
Expand Down
8 changes: 0 additions & 8 deletions r2/r2/lib/manager/db_manager.py
Expand Up @@ -78,13 +78,8 @@ def rels_iter(self):
yield name, (type1_name, type2_name, engines)

def mark_dead(self, engine, g_override=None):
from r2.lib import services
logger.error("db_manager: marking connection dead: %r" % engine)
self.dead[engine] = time.time()
if g_override is None:
services.AppServiceMonitor.mark_db_down(engine.url.host)
else:
services.mark_db_down(g_override.servicecache, engine.url.host)

def test_engine(self, engine, g_override=None):
try:
Expand All @@ -106,10 +101,7 @@ def get_engines(self, names):
return [self._engines[name] for name in names if name in self._engines]

def get_read_table(self, tables):
from r2.lib.services import AppServiceMonitor
# short-cut for only one element
if len(tables) == 1:
return tables[0]

return random.choice(list(tables))

112 changes: 0 additions & 112 deletions r2/supervise_watcher.py

This file was deleted.

0 comments on commit 706b27b

Please sign in to comment.