Skip to content

Commit

Permalink
Add https support to server with defaulted key and cert. Closes #428.
Browse files Browse the repository at this point in the history
  • Loading branch information
chambridge committed Jan 30, 2018
1 parent e078418 commit 97cbd79
Show file tree
Hide file tree
Showing 16 changed files with 445 additions and 84 deletions.
3 changes: 3 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,6 @@ client
quipucords/db.sqlite3
node_modules
package-lock.json

logs
*.log
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ coverage.xml
# Django stuff:
*.log
local_settings.py
logs

# Flask stuff:
instance/
Expand Down
5 changes: 3 additions & 2 deletions AUTHORS.rst
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
quipucords was written by Chris Hambridge <chambrid@redhat.com>
and Noah Lavine <nlavine@redhat.com>.
quipucords was written by Chris Hambridge <chambrid@redhat.com>,
Noah Lavine <nlavine@redhat.com>, Kevan Holdaway<kholdawa@redhat.com>,
and Ashley Aiken <aaiken@redhat.com>.
44 changes: 34 additions & 10 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,25 +1,49 @@
FROM fedora:26

RUN yum -y groupinstall "Development tools"
RUN yum -y install python-devel python-tools python3-devel python3-tools sshpass which
RUN yum -y install python-devel python-tools python3-devel python3-tools sshpass which supervisor

RUN pip install virtualenv
RUN virtualenv -p python3 ~/venv

# Create base directory
RUN mkdir -p /app

# Setup dependencies
COPY requirements.txt /tmp/reqs.txt
COPY requirements.txt /app/reqs.txt
# Remove last 2 lines
RUN sed -e :a -e '$d;N;2,3ba' -e 'P;D' /tmp/reqs.txt > /tmp/requirements.txt
RUN . ~/venv/bin/activate;pip install -r /tmp/requirements.txt
RUN sed -e :a -e '$d;N;2,3ba' -e 'P;D' /app/reqs.txt > /app/requirements.txt
RUN . ~/venv/bin/activate;pip install -r /app/requirements.txt
RUN . ~/venv/bin/activate;pip install coverage==3.6
RUN . ~/venv/bin/activate;pip install gunicorn==19.7.1

# Create /deploy
RUN mkdir -p /deploy
COPY deploy/gunicorn.conf.py /deploy
COPY deploy/run.sh /deploy

# Create /etc/ssl
RUN mkdir -p /etc/ssl/
COPY deploy/ssl/* /etc/ssl/
VOLUME /etc/ssl

# Create /var/logs
RUN mkdir -p /var/logs
VOLUME /var/logs

# Copy server code
COPY . /tmp/
WORKDIR /tmp/
COPY . /app/
WORKDIR /app/

# Initialize database & Collect static files
RUN . ~/venv/bin/activate;make server-init server-static

WORKDIR /app/quipucords

# Initialize database
RUN . ~/venv/bin/activate;python -V;make server-init
ENV DJANGO_LOG_LEVEL=INFO
ENV DJANGO_LOG_FORMATTER=verbose
ENV DJANGO_LOG_HANDLERS=console,file
ENV DJANGO_LOG_FILE=/var/logs/app.log

EXPOSE 8000
CMD . ~/venv/bin/activate;python -V;python /tmp/quipucords/manage.py runserver 0.0.0.0:8000
EXPOSE 443
CMD /deploy/run.sh
21 changes: 16 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ PYDIRS = quipucords

BINDIR = bin

OMIT_PATTERNS = */test*.py,*/manage.py,*/apps.py,*/wsgi.py,*/es_receiver.py,*/settings.py,*/migrations/*,*/docs/*,*/client/*
OMIT_PATTERNS = */test*.py,*/manage.py,*/apps.py,*/wsgi.py,*/es_receiver.py,*/settings.py,*/migrations/*,*/docs/*,*/client/*,*/deploy/*

help:
@echo "Please use \`make <target>' where <target> is one of:"
Expand Down Expand Up @@ -53,15 +53,26 @@ swagger-valid:
node_modules/swagger-cli/bin/swagger-cli.js validate docs/swagger.yml

lint-flake8:
flake8 . --ignore D203 --exclude quipucords/api/migrations,docs,build,.vscode,client,venv
flake8 . --ignore D203 --exclude quipucords/api/migrations,docs,build,.vscode,client,venv,deploy

lint-pylint:
find . -name "*.py" -not -name "*0*.py" -not -path "./build/*" -not -path "./docs/*" -not -path "./.vscode/*" -not -path "./client/*" -not -path "./venv/*" | xargs $(PYTHON) -m pylint --load-plugins=pylint_django --disable=duplicate-code
find . -name "*.py" -not -name "*0*.py" -not -path "./build/*" -not -path "./docs/*" -not -path "./.vscode/*" -not -path "./client/*" -not -path "./venv/*" -not -path "./deploy/*" | xargs $(PYTHON) -m pylint --load-plugins=pylint_django --disable=duplicate-code

lint: lint-flake8 lint-pylint

server-init:
$(PYTHON) quipucords/manage.py makemigrations api; $(PYTHON) quipucords/manage.py migrate; echo "from django.contrib.auth.models import User; User.objects.filter(email='admin@example.com').delete(); User.objects.create_superuser('admin', 'admin@example.com', 'pass')" | $(PYTHON) quipucords/manage.py shell
server-makemigrations:
$(PYTHON) quipucords/manage.py makemigrations api

server-migragte:
$(PYTHON) quipucords/manage.py migrate

server-set-superuser:
echo "from django.contrib.auth.models import User; User.objects.filter(email='admin@example.com').delete(); User.objects.create_superuser('admin', 'admin@example.com', 'pass')" | $(PYTHON) quipucords/manage.py shell

server-init: server-makemigrations server-migragte server-set-superuser

server-static:
$(PYTHON) quipucords/manage.py collectstatic --settings quipucords.settings --no-input

serve:
$(PYTHON) quipucords/manage.py runserver
Expand Down
20 changes: 3 additions & 17 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,9 @@ You must have `Docker installed <https://docs.docker.com/engine/installation/>`_

3. Run the docker image::

docker run -d -p8000:8000 -i quipucords:latest
docker run -d -p443:443 -i quipucords:latest

Now the server should be running and you can launch the `Credential Browseable API <http://127.0.0.1:8000/api/v1/credentials/>`_.
Now the server should be running and you can launch the `login <https://127.0.0.1/>`_.
You can work with the APIs directly or you can use the CLI. You can configure the CLI with the following command::

qpc server config --host 127.0.0.1
Expand Down Expand Up @@ -177,20 +177,6 @@ If you intend to run on Mac OS there are several more steps required.
See explanation `here <https://github.com/ansible/ansible/issues/31869#issuecomment-337769174>`_.


Piping data to Elasticsearch
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Before starting the server, set the following environment variables::

USE_ELASTICSEARCH=True
ES_HOSTS=http://ES_HOST1,http://ES_HOST2

Additionally, there is a `docker-compose.yml` file located in the `elasticsearch` directory. To start a local docker image do the following:
* Ensure you have docker and docker-compose installed
* Open a terminal window and switch to the `elasticsearch` folder
* Run `docker-compose up` to start Elasticsearch and Kibana
* Run `docker-compose down` to stop Elasticsearch and Kibana


Testing
^^^^^^^

Expand Down Expand Up @@ -226,6 +212,6 @@ Reference the `CONTRIBUTING <CONTRIBUTING.rst>`_ guide for information to the pr

Copyright & License
-------------------
Copyright 2017, Red Hat, Inc.
Copyright 2017-2018, Red Hat, Inc.

quipucords is released under the `GNU Public License version 3 <LICENSE>`_.
237 changes: 237 additions & 0 deletions deploy/gunicorn.conf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,237 @@
"""Gunicorn configuration."""
#
# Server socket
#
# bind - The socket to bind.
#
# A string of the form: 'HOST', 'HOST:PORT', 'unix:PATH'.
# An IP is a valid HOST.
#
# backlog - The number of pending connections. This refers
# to the number of clients that can be waiting to be
# served. Exceeding this number results in the client
# getting an error when attempting to connect. It should
# only affect servers under significant load.
#
# Must be a positive integer. Generally set in the 64-2048
# range.
#

bind = '0.0.0.0:443'
backlog = 2048


#
# Worker processes
#
# workers - The number of worker processes that this server
# should keep alive for handling requests.
#
# A positive integer generally in the 2-4 x $(NUM_CORES)
# range. You'll want to vary this a bit to find the best
# for your particular application's work load.
#
# worker_class - The type of workers to use. The default
# sync class should handle most 'normal' types of work
# loads. You'll want to read
# http://docs.gunicorn.org/en/latest/design.html#choosing-a-worker-type
# for information on when you might want to choose one
# of the other worker classes.
#
# A string referring to a Python path to a subclass of
# gunicorn.workers.base.Worker. The default provided values
# can be seen at
# http://docs.gunicorn.org/en/latest/settings.html#worker-class
#
# worker_connections - For the eventlet and gevent worker classes
# this limits the maximum number of simultaneous clients that
# a single process can handle.
#
# A positive integer generally set to around 1000.
#
# timeout - If a worker does not notify the master process in this
# number of seconds it is killed and a new worker is spawned
# to replace it.
#
# Generally set to thirty seconds. Only set this noticeably
# higher if you're sure of the repercussions for sync workers.
# For the non sync workers it just means that the worker
# process is still communicating and is not tied to the length
# of time required to handle a single request.
#
# keepalive - The number of seconds to wait for the next request
# on a Keep-Alive HTTP connection.
#
# A positive integer. Generally set in the 1-5 seconds range.
#

workers = 1
worker_class = 'sync'
worker_connections = 1000
timeout = 30
keepalive = 2

#
# spew - Install a trace function that spews every line of Python
# that is executed when running the server. This is the
# nuclear option.
#
# True or False
#

spew = False

#
# Server mechanics
#
# daemon - Detach the main Gunicorn process from the controlling
# terminal with a standard fork/fork sequence.
#
# True or False
#
# pidfile - The path to a pid file to write
#
# A path string or None to not write a pid file.
#
# user - Switch worker processes to run as this user.
#
# A valid user id (as an integer) or the name of a user that
# can be retrieved with a call to pwd.getpwnam(value) or None
# to not change the worker process user.
#
# group - Switch worker process to run as this group.
#
# A valid group id (as an integer) or the name of a user that
# can be retrieved with a call to pwd.getgrnam(value) or None
# to change the worker processes group.
#
# umask - A mask for file permissions written by Gunicorn. Note that
# this affects unix socket permissions.
#
# A valid value for the os.umask(mode) call or a string
# compatible with int(value, 0) (0 means Python guesses
# the base, so values like "0", "0xFF", "0022" are valid
# for decimal, hex, and octal representations)
#
# tmp_upload_dir - A directory to store temporary request data when
# requests are read. This will most likely be disappearing soon.
#
# A path to a directory where the process owner can write. Or
# None to signal that Python should choose one on its own.
#

daemon = False
pidfile = None
umask = 0
user = None
group = None
tmp_upload_dir = None

#
# Logging
#
# logfile - The path to a log file to write to.
#
# A path string. "-" means log to stdout.
#
# loglevel - The granularity of log output
#
# A string of "debug", "info", "warning", "error", "critical"
#

errorlog = '-'
loglevel = 'info'
accesslog = '-'
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s"' \
' %(s)s %(b)s "%(f)s" "%(a)s"'


# Raw environment
raw_env = [
'DJANGO_SETTINGS_MODULE=quipucords.settings'
]

# SSL configuration
keyfile = '/etc/ssl/server.key'
certfile = '/etc/ssl/server.crt'



#
# Process naming
#
# proc_name - A base to use with setproctitle to change the way
# that Gunicorn processes are reported in the system process
# table. This affects things like 'ps' and 'top'. If you're
# going to be running more than one instance of Gunicorn you'll
# probably want to set a name to tell them apart. This requires
# that you install the setproctitle module.
#
# A string or None to choose a default of something like 'gunicorn'.
#

proc_name = None

#
# Server hooks
#
# post_fork - Called just after a worker has been forked.
#
# A callable that takes a server and worker instance
# as arguments.
#
# pre_fork - Called just prior to forking the worker subprocess.
#
# A callable that accepts the same arguments as after_fork
#
# pre_exec - Called just prior to forking off a secondary
# master process during things like config reloading.
#
# A callable that takes a server instance as the sole argument.
#


def post_fork(server, worker):
"""After fork logging."""
server.log.info('Worker spawned (pid: %s)', worker.pid)


def pre_fork(server, worker):
"""Before fork logging."""
pass


def pre_exec(server):
"""Before exectution logging."""
server.log.info('Forked child, re-executing.')


def when_ready(server):
"""Notify when server is ready."""
server.log.info('Server is ready. Spawning workers')


def worker_int(worker):
"""Signal handling for worker."""
worker.log.info('worker received INT or QUIT signal')

# get traceback info
import threading
import sys
import traceback
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for threadId, stack in sys._current_frames().items():
code.append('\n# Thread: %s(%d)' % (id2name.get(threadId, ''),
threadId))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename,
lineno, name))
if line:
code.append(' %s' % (line.strip()))
worker.log.debug('\n'.join(code))


def worker_abort(worker):
"""Abort logging for worker."""
worker.log.info('worker received SIGABRT signal')
Loading

0 comments on commit 97cbd79

Please sign in to comment.