diff --git a/bin/fcomm-index-latest-builds b/bin/fcomm-index-latest-builds deleted file mode 100755 index 6a0892b8..00000000 --- a/bin/fcomm-index-latest-builds +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python - -# use this script when running the indexer from cron - -import sys -import os -import shutil - -from fedoracommunity.search.latest_version_mapper import run -try: - from lockfile import LockFile -except: - from lockfile import FileLock as LockFile - -from optparse import OptionParser - -if __name__ == "__main__": - parser = OptionParser() - parser.add_option("-p", "--path", dest="cache_path", - help="path to where we create or update the version map", - metavar="CACHEPATH") - parser.add_option("--koji-url", dest="koji_url", - default='http://koji.fedoraproject.org/kojihub', - help="the base url to get koji data from", - metavar="KOJIURL") - parser.add_option("--action", dest="action", - default='update', - help="what action to perform. Either 'init' or 'update'", - metavar="ACTION") - parser.add_option("--timestamp", dest="timestamp", - default=None, - help="how far back to run the query.") - - (options, args) = parser.parse_args() - lockfile = LockFile( - os.path.join(options.cache_path, '.fcomm_version_mapper_lock')) - - try: - lockfile.acquire(timeout=30) - except Exception as e: - print "Error acquiring lock file: %s" % str(e) - exit(-1) - - # None or an int - timestamp = options.timestamp and int(options.timestamp) - - try: - run( - cache_path=options.cache_path, - action=options.action, - timestamp=timestamp, - koji_url=options.koji_url) - finally: - lockfile.release() diff --git a/bootstrap.py b/bootstrap.py index 82d51631..8f0aa3b1 100755 --- a/bootstrap.py +++ b/bootstrap.py @@ -33,7 +33,7 @@ def install_deps(): 'xapian-bindings-python', 'diffstat', 'fedpkg', 'svn', 'wget', 'python-xappy', 'python-webob', 'moksha', 'TurboGears2', 'python-dogpile-cache', 'python-dogpile-core', - 'python-retask', 'python-memcached', 'memcached', + 'python-memcached', 'memcached', 'python-markdown', ] run('sudo dnf install -q -y ' + ' '.join(reqs)) diff --git a/development.ini b/development.ini index f4f03b18..0cd4e168 100644 --- a/development.ini +++ b/development.ini @@ -34,8 +34,6 @@ fedoracommunity.connector.pkgdb.baseurl = https://admin.fedoraproject.org/pkgdb fedoracommunity.connector.tagger.baseurl = https://apps.fedoraproject.org/tagger fedoracommunity.connector.mdapi.baseurl = https://apps.fedoraproject.org/mdapi fedoracommunity.connector.icons.baseurl = https://alt.fedoraproject.org/pub/alt/screenshots -fedoracommunity.connector.yum.conf = %(here)s/production/yum.conf -fedoracommunity.rpm_cache = %(here)s/rpm_cache/ fedmenu.url = https://apps.fedoraproject.org/fedmenu fedmenu.data_url = https://apps.fedoraproject.org/js/data.js @@ -54,13 +52,6 @@ datagrepper_url=http://localhost:5000/raw # This is insecure, use only for testing fedora.clients.check_certs = False -# For the cache worker daemon -cache-worker.pidfile = %(here)s/fcomm-cache-worker.pid -cache-worker.logfile = %(here)s/fcomm-cache-worker.log -# We can have no more than 1 thread until the following is resolved -# https://github.com/kushaldas/retask/issues/2 -cache-worker.threads = 1 - ## Moksha configuration moksha.extensionpoints = True diff --git a/fedora-packages.spec b/fedora-packages.spec index cda976cc..294fd0d4 100644 --- a/fedora-packages.spec +++ b/fedora-packages.spec @@ -78,9 +78,6 @@ Requires: fedmsg Requires: rpmdevtools Requires: python-daemon -# Needs to be running so the wsgi process can share jobs with worker processes -Requires: redis - Obsoletes: myfedora Conflicts: fedoracommunity @@ -130,20 +127,9 @@ cp fedoracommunity/widgets/static/javascript/jquery.jstree.js %{buildroot}%{_dat %{__install} production/apache/%{oldname}.wsgi %{buildroot}%{_datadir}/%{oldname}/production/apache/%{oldname}.wsgi %{__install} production/sample-production.ini %{buildroot}%{_datadir}/%{oldname}/production -%{__mkdir_p} %{buildroot}%{_sysconfdir}/init.d -%{__install} initsys/sysv/fcomm-cache-worker.init %{buildroot}%{_sysconfdir}/init.d/fcomm-cache-worker - -%{__mkdir_p} %{buildroot}%{_sbindir} -%{__mv} %{buildroot}%{_bindir}/fcomm-cache-worker %{buildroot}%{_sbindir}/fcomm-cache-worker - -# Logrotate configuration (for the cache-worker daemon) -%{__mkdir_p} %{buildroot}/%{_sysconfdir}/logrotate.d -%{__install} logrotate %{buildroot}/%{_sysconfdir}/logrotate.d/%{oldname} - %clean %{__rm} -rf %{buildroot} - %files %defattr(-,root,root,-) %doc README.txt COPYING AUTHORS @@ -155,13 +141,7 @@ cp fedoracommunity/widgets/static/javascript/jquery.jstree.js %{buildroot}%{_dat %{python_sitelib}/%{oldname}-%{version}-py%{pyver}.egg-info/ #%{python_sitelib}/%{oldname}-%{version}-py%{pyver}-nspkg.pth %attr(-,apache,apache) %dir %{_localstatedir}/cache/%{oldname} -%{_bindir}/fedoracommunity_makeyumcache %{_bindir}/fcomm-index-packages -%{_bindir}/fcomm-index-latest-builds -%{_sbindir}/fcomm-cache-worker -%{_sysconfdir}/init.d/fcomm-cache-worker -%config(noreplace) %{_sysconfdir}/logrotate.d/%{oldname} - %changelog * Tue Oct 20 2015 Ralph Bean - 2.0.20-1 diff --git a/fedoracommunity/connectors/api/worker.py b/fedoracommunity/connectors/api/worker.py deleted file mode 100644 index f146abdd..00000000 --- a/fedoracommunity/connectors/api/worker.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python -""" This is a long-running worker process that generates values asychronously -for dogpile.cache and the fcomm_connector api. - -It should be run under a sysvinit script as a daemon. It should be run as 8 or -so threads. -""" - -import tg -import os -import sys -import json -import time -import types -import retask.queue -import memcache -import threading - -from paste.deploy import appconfig - -import dogpile.cache.api -import dogpile.cache.region - -threads = [] - -import logging -log = logging.getLogger("fcomm-cache-worker") - - -class fake_request(object): - environ = {} - - -class Thread(threading.Thread): - def init(self): - self.die = False - - # Initialize an incoming redis queue right off the bat. - self.queue = retask.queue.Queue('fedora-packages') - self.queue.connect() - - config = appconfig("config:" + find_config_file()) - tg.config.update(config) - - # Disable all caching so we don't cyclically cache ourselves - # into a corner - for key in list(tg.config.keys()): - if 'cache.connector' in key: - del tg.config[key] - - from fedoracommunity.connectors.api.mw import FCommConnectorMiddleware - self.mw_obj = FCommConnectorMiddleware(lambda *args, **kw: None) - - # Set up one memcached connection when we start. - self.mc = memcache.Client([config['cache.connectors.arguments.url']]) - - def iteration(self): - task = self.queue.wait(2) - if task is False: - log.info("No tasks found in the queue. Sleeping for 2 seconds.") - return - log.info("Picked up a task from the queue.") - data = json.loads(task.data) - - try: - # Here are those three attribute that we hung - # on the original cached fn - name = data['fn']['name'] - path = data['fn']['path'] - typ = data['fn']['type'] - - conn_cls = self.mw_obj._connectors[name]['connector_class'] - - request = fake_request() - conn_obj = conn_cls(request.environ, request) - - if typ == 'query': - fn = conn_obj._query_paths[path]['query_func'] - else: - fn = conn_obj._method_paths[path] - - fn = types.MethodType(fn, conn_obj, conn_cls) - - log.info("Calling {name}(**{kw})".format( - name=repr(fn), kw=data['kw'])) - - value = fn(**data['kw']) - - value = dogpile.cache.api.CachedValue(value, { - "ct": time.time(), - "v": dogpile.cache.region.value_version, - }) - cache_key = str(data['cache_key']) - log.debug("Value Recorded at " + cache_key) - self.mc.set(cache_key, value) - finally: - # Release the kraken! - log.info("Mutex released.") - self.mc.delete(str(data['mutex_key'])) - - def run(self): - self.init() - while not self.die: - try: - self.iteration() - except KeyboardInterrupt: - break - except Exception: - import traceback - log.error(traceback.format_exc()) - sys.stdout.flush() - log.info("Thread exiting.") - - def kill(self): - self.die = True - - def __del__(self): - try: - # I thought this was automatic.. but just to be safe. - self.mc.disconnect_all() - del self.mc - except Exception: - pass - - -def find_config_file(): - locations = ( - '.', - '/etc/fedoracommunity/', - '/'.join(__file__.split('/') + ['..', '..', '..', '..']), - ) - for config_path in locations: - for config_file in ('production.ini', 'development.ini'): - cfg = os.path.join(os.path.abspath(config_path), config_file) - if os.path.isfile(cfg): - return cfg - return None - - -def daemon(): - def die_in_a_fire(signum, stack): - for thread in threads: - thread.kill() - - from daemon import DaemonContext - try: - from daemon.pidfile import TimeoutPIDLockFile as PIDLockFile - except: - from daemon.pidlockfile import PIDLockFile - - config = appconfig("config:" + find_config_file()) - - #pidlock = PIDLockFile('/var/run/fedoracommunity/worker.pid') - #output = file('/var/log/fedoracommunity/worker.log', 'a') - pidlock = PIDLockFile( - config.get('cache-worker.pidfile', '/tmp/fedoracommunity-worker.pid') - ) - output = file( - config.get('cache-worker.logfile', '/tmp/fedoracommunity-worker.log'), - 'a') - - daemon = DaemonContext(pidfile=pidlock, stdout=output, stderr=output) - daemon.terminate = die_in_a_fire - - n = int(config.get('cache-worker.threads', '8')) - with daemon: - log.info("Creating %i threads" % n) - for i in range(n): - threads.append(Thread()) - - for thread in threads: - thread.start() - - # I used to do thread.join() here, but that makes it so the - # signal_handler never gets fired. Crazy python... - while any([not thread.die for thread in threads]): - time.sleep(2) - - -def foreground(): - t = Thread() - t.run() - - -def main(): - logging.basicConfig( - level=logging.DEBUG, - stream=sys.stdout, - ) - if '--daemon' in sys.argv: - daemon() - else: - foreground() diff --git a/fedoracommunity/connectors/kojiconnector.py b/fedoracommunity/connectors/kojiconnector.py index a07b1fed..4f08b0c0 100644 --- a/fedoracommunity/connectors/kojiconnector.py +++ b/fedoracommunity/connectors/kojiconnector.py @@ -82,12 +82,6 @@ def register(cls): 'fedoracommunity.connector.koji.pkgurl', 'http://koji.fedoraproject.org/packages') - cls._rpm_cache = config.get('fedoracommunity.rpm_cache', - None) - if not cls._rpm_cache: - print "You must specify fedoracommunity.rpm_cache in you .ini file" - exit(-1) - cls._mdapi_url = config.get('fedoracommunity.connector.mdapi.url', 'https://apps.fedoraproject.org/mdapi') diff --git a/fedoracommunity/search/iconcache.py b/fedoracommunity/search/iconcache.py deleted file mode 100644 index 96657eaf..00000000 --- a/fedoracommunity/search/iconcache.py +++ /dev/null @@ -1,91 +0,0 @@ -import os - -from rpmcache import RPMCache -import Image - -class IconCache(object): - def __init__(self, yum_base, icon_rpm_names, icon_dir, cache_dir): - self.found_icons = {} # {'icon-name': True} - self._rpm_caches = [] - self._rpm_caches.extend(icon_rpm_names) - self.yum_base = yum_base - self.cache_dir = cache_dir - self.icon_dir = icon_dir - - def check_pkg(self, pkg): - try: - i = self._rpm_caches.index(pkg['name']) - self._rpm_caches[i] = RPMCache(pkg, self.yum_base, self.cache_dir) - self._rpm_caches[i].open() - except ValueError: - pass - - def _find_best_icon(self, icon, cache): - if icon.endswith('.png'): - icon = icon[:-4] - - icon_path_list = cache.find_files(icon + '.png', '*.png') - best_match = None - - for icon_path in icon_path_list: - try: - pixbuf = Image.open(icon_path) - pixbuf.load() - except Exception: - continue - - width = pixbuf.size[0] - - # try to find the best match to 128x128 - if width == 128: - best_match = pixbuf - break - elif best_match == None: - best_match = pixbuf - elif width > 128: - best_match_width = best_match.size[0] - if best_match_width < 128: - best_match = pixbuf - elif width < best_match_width: - best_match = pixbuf - elif width < 128: - best_match_width = best_match.size[0] - if best_match_width > 128: - continue - elif width > best_match_width: - best_match = pixbuf - - if not best_match: - return None - - if best_match.size[0] > 128: - # resize to 128 - best_match = best_match.resize((128, 128)) - elif best_match.size[0] < 128: - # smaller icons should be pasted onto a generic icon in - # the future but for now just resize - best_match = best_match.resize((128, 128)) - - return best_match - - def generate_icon(self, icon_name, extra_cache): - if self.found_icons.get(icon_name, None): - return icon_name - else: - search_packages = [extra_cache] - search_packages.extend(self._rpm_caches) - - for icon_rpm_cache in search_packages: - icon = self._find_best_icon(icon_name, icon_rpm_cache) - - if icon: - self.found_icons[icon_name] = True - icon.save(os.path.join(self.icon_dir, icon_name + '.png'), - 'PNG') - - return icon_name - - def close(self): - for cache in self._rpm_caches: - if isinstance(cache, RPMCache): - cache.close() diff --git a/fedoracommunity/search/index.py b/fedoracommunity/search/index.py index cc21a653..0586c02c 100644 --- a/fedoracommunity/search/index.py +++ b/fedoracommunity/search/index.py @@ -500,6 +500,3 @@ def run(cache_path, tagger_url=None, pkgdb_url=None, mdapi_url=None, icons_url=N log.info("Indexing packages.") indexer.index_packages() log.info("Indexed a ton of packages.") - -if __name__ == '__main__': - run('index_cache', join(os.path.dirname(__file__), 'yum.conf'), 'http://apps.fedoraproject.org/tagger/dump') diff --git a/production/sample-production.ini b/production/sample-production.ini index 6d7dd5f2..3bfe34f7 100644 --- a/production/sample-production.ini +++ b/production/sample-production.ini @@ -31,6 +31,9 @@ fedoracommunity.connector.kojihub.baseurl = https://koji.fedoraproject.org/kojih fedoracommunity.connector.bugzilla.baseurl = https://bugzilla.redhat.com/xmlrpc.cgi fedoracommunity.connector.fas.baseurl = https://admin.fedoraproject.org/accounts/ fedoracommunity.connector.bodhi.baseurl = https://bodhi.fedoraproject.org +fedoracommunity.connector.tagger.baseurl = https://apps.fedoraproject.org/tagger +fedoracommunity.connector.mdapi.baseurl = https://apps.fedoraproject.org/mdapi +fedoracommunity.connector.icons.baseurl = https://alt.fedoraproject.org/pub/alt/screenshots # FAS is locked down so we need a minimal user inorder to get public user info # to unauthenticated users. You need to get a locked down account for this @@ -43,13 +46,6 @@ fedoracommunity.connector.bodhi.baseurl = https://bodhi.fedoraproject.org # This is insecure, use only for testing fedora.clients.check_certs = False -# For the cache worker daemon -cache-worker.pidfile = /var/cache/fedoracommunity/fcomm-cache-worker.pid -cache-worker.logfile = /var/log/fedoracommunity/fcomm-cache-worker.log -# We can have no more than 1 thread until the following is resolved -# https://github.com/kushaldas/retask/issues/2 -cache-worker.threads = 1 - ## Moksha configuration moksha.connectors = True moksha.extensionpoints = True