Skip to content

Commit

Permalink
Merge 8a33d0a into fa12fb6
Browse files Browse the repository at this point in the history
  • Loading branch information
honzajavorek committed Oct 18, 2018
2 parents fa12fb6 + 8a33d0a commit 84420b1
Show file tree
Hide file tree
Showing 54 changed files with 4,383 additions and 4,674 deletions.
345 changes: 0 additions & 345 deletions .github/CONTRIBUTING.md

This file was deleted.

290 changes: 290 additions & 0 deletions .github/CONTRIBUTING.rst

Large diffs are not rendered by default.

29 changes: 12 additions & 17 deletions .travis.yml
Expand Up @@ -3,11 +3,8 @@
sudo: false
dist: "trusty"
language: "node_js"
addons:
apt:
packages:
- "python3"
- "python3-pip"
node_js:
- "10"
cache:
directories:
- "node_modules"
Expand All @@ -16,30 +13,28 @@ before_install:
# so 'conventional-changelog-lint' could compare commits and lint them: marionebl/conventional-changelog-lint#7
- "git remote set-branches origin master && git fetch && git checkout master && git checkout -"
- "npm -g install npm@6"
- "pip3 install --user -r docs/requirements.txt"
install: "npm install --no-optional"
- "pyenv global 3.6"
install:
- "npm install --no-optional"
- "pip install --user -r docs/requirements.txt"
jobs:
include:
# stage 1, all following runs in parallel:
- stage: "quality checks & tests"
env: "JOB=quality_checks"
node_js: "10"
script: "npm run lint && npm run docs:lint"
- node_js: "10"
env: "JOB=docs_build_dry_run" # why dry run? because production build happens directly on ReadTheDocs infrastructure
- env: "JOB=docs_build_dry_run" # why dry run? because production build happens directly on ReadTheDocs infrastructure
script: "npm run docs:build"
- node_js: "6"
env: "JOB=node6"
- env: "JOB=node6"
node_js: "6"
script: "npm run test:coverage && npm run coveralls"
- node_js: "8"
env: "JOB=node8"
- env: "JOB=node8"
node_js: "8"
script: "npm run test:coverage && npm run coveralls"
- node_js: "10"
env: "JOB=node10"
- env: "JOB=node10"
script: "npm run test:coverage && npm run coveralls"

# stage 2
- stage: "semantic release"
node_js: "10"
script: "npm run semantic-release || true"
if: fork = false AND branch = master AND type = push
6 changes: 6 additions & 0 deletions doc8.ini
@@ -0,0 +1,6 @@
[doc8]
ignore-path=docs/_build

# Do not check line length. This project relies on writing in an editor with
# word wrap turned on. No explicit newlines, please.
ignore=D001
45 changes: 0 additions & 45 deletions docs/_extensions/generate-cli-docs.coffee

This file was deleted.

228 changes: 16 additions & 212 deletions docs/conf.py
@@ -1,14 +1,11 @@
import os
import sys
import re
import json
import subprocess
import urllib.request
from docutils import nodes

from sphinx.util import console
from sphinx.errors import SphinxError
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify


###########################################################################
Expand All @@ -20,18 +17,24 @@

# -- Environment ----------------------------------------------------------

# Explicitly put the extensions directory to Python path
sys.path.append(os.path.abspath('extensions'))

# Detect whether the build happens on ReadTheDocs
IS_READTHEDOCS = os.environ.get('READTHEDOCS') == 'True'

# Specify paths
docs_dir = os.path.dirname(__file__)
project_dir = os.path.join(docs_dir, '..')
node_modules_bin_dir = os.path.join(project_dir, 'node_modules', '.bin')

# Install all npm dependencies if on ReadTheDocs. This requires the latest
# ReadTheDocs build image, which supports Node.js out of the box. This is
# specified in the readthedocs.yml in the root of the project.
if IS_READTHEDOCS:
installation_output = subprocess.getoutput('bash ' + os.path.join(docs_dir, 'install-node.sh'))
node_bin = installation_output.splitlines()[-1].strip()
else:
node_bin = 'node'
subprocess.check_call('npm install', cwd=project_dir, shell=True)

# Load package.json data
with open(os.path.join(project_dir, 'package.json')) as f:
package_json = json.load(f)

Expand All @@ -42,13 +45,13 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'cli_options',
'pygments_markdown_lexer',
]

# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = '.md'
source_parsers = {'.md': CommonMarkParser}
source_suffix = '.rst'

# The master document.
master_doc = 'index'
Expand Down Expand Up @@ -85,12 +88,7 @@ def get_release():
# Semantic Release wasn't able to determine a new version number,
# either because of some error or because there are no changes which
# would bump the version number. Stick to the latest released version.
if IS_READTHEDOCS:
npm_bin = node_bin.replace('/bin/node', '/bin/npm')
command = '{} {} view dredd version'.format(node_bin, npm_bin)
else:
command = 'npm view dredd version'
return subprocess.getoutput(command).strip()
return subprocess.getoutput('npm view dredd version').strip()

# The full version, including alpha/beta/rc tags.
release = get_release()
Expand All @@ -116,10 +114,10 @@ def get_release():

# -- Options for HTML output ----------------------------------------------

# The theme to use for HTML and HTML Help pages. See the documentation for
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if IS_READTHEDOCS:
# equals to default RTD theme
# equals to the default RTD theme
html_theme = 'default'
else:
# emulates the default RTD theme for local development
Expand Down Expand Up @@ -168,200 +166,6 @@ def get_release():
]


# -- Custom Extensions ----------------------------------------------------

def setup(app):
# Lets Hercule (https://www.npmjs.com/package/hercule) and Node.js scripts
# from the 'docs/_extensions' directory to process each document before
# it gets processed by Sphinx
init_js_extensions(app)

# Fixing how references (local links) work with Markdown
app.connect('doctree-read', collect_ref_data)
app.connect('doctree-resolved', process_refs)

# Better support for Markdown (see https://recommonmark.readthedocs.io/en/latest/auto_structify.html)
app.add_config_value('recommonmark_config', {
'enable_eval_rst': True,
'enable_auto_toc_tree': True,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)


# -- Markdown References --------------------------------------------------

def collect_ref_data(app, doctree):
"""
Finds all anchors and references (local links) within documents,
and saves them as meta data
"""
filename = doctree.attributes['source'].replace(docs_dir, '').lstrip('/')
docname = filename.replace('.md', '')

anchors = []
references = []

for node in doctree.traverse(nodes.raw):
if 'name=' in node.rawsource:
match = re.search(r'name="([^\"]+)', node.rawsource)
if match:
anchors.append(match.group(1))
elif 'id=' in node.rawsource:
match = re.search(r'id="([^\"]+)', node.rawsource)
if match:
anchors.append(match.group(1))

for node in doctree.traverse(nodes.section):
for target in frozenset(node.attributes.get('ids', [])):
anchors.append(target)

for node in doctree.traverse(nodes.reference):
uri = node.get('refuri')
if uri and not uri.startswith(('http://', 'https://')):
references.append(to_reference(uri, basedoc=docname))

app.env.metadata[docname]['anchors'] = anchors
app.env.metadata[docname]['references'] = references

def process_refs(app, doctree, docname):
"""
Fixes all references (local links) within documents, breaks the build
if it finds any links to non-existent documents or anchors.
"""
for reference in app.env.metadata[docname]['references']:
referenced_docname, anchor = parse_reference(reference)

if referenced_docname not in app.env.metadata:
message = "Document '{}' is referenced from '{}', but it could not be found"
raise SphinxError(message.format(referenced_docname, docname))

if anchor and anchor not in app.env.metadata[referenced_docname]['anchors']:
message = "Section '{}#{}' is referenced from '{}', but it could not be found"
raise SphinxError(message.format(referenced_docname, anchor, docname))

for node in doctree.traverse(nodes.reference):
uri = node.get('refuri')
if to_reference(uri, basedoc=docname) == reference:
node['refuri'] = to_uri(app, referenced_docname, anchor)

def to_uri(app, docname, anchor=None):
uri = ''

if IS_READTHEDOCS:
language = app.config.language or 'en'
version_name = os.environ.get('READTHEDOCS_VERSION')
uri = '/{}/{}'.format(language, version_name)

uri += '/{}.html'.format(docname)
if anchor:
uri += '#{}'.format(anchor)

return uri

def to_reference(uri, basedoc=None):
"""
Helper function, compiles a 'reference' from given URI and base
document name
"""
if '#' in uri:
filename, anchor = uri.split('#', 1)
filename = filename or basedoc
else:
filename = uri or basedoc
anchor = None

if not filename:
message = "For self references like '{}' you need to provide the 'basedoc' argument".format(uri)
raise ValueError(message)

reference = os.path.splitext(filename.lstrip('/'))[0]
if anchor:
reference += '#' + anchor
return reference

def parse_reference(reference):
"""
Helper function, parses a 'reference' to document name and anchor
"""
if '#' in reference:
docname, anchor = reference.split('#', 1)
else:
docname = reference
anchor = None
return docname, anchor


# -- JavaScript Extensions ------------------------------------------------

js_extensions_dir = os.path.join(docs_dir, '_extensions')
js_extensions = []
js_interpreters = {
'.js': [node_bin],
'.coffee': [node_bin, os.path.join(node_modules_bin_dir, 'coffee')]
}

def init_js_extensions(app):
"""
Looks up and registers the Node.js extensions
Loads Node.js scripts from the 'docs/_extensions' directory, assigns
interpreters to them (node vs. coffee) and registers the 'run_js_extensions'
function.
"""
app.info(console.bold('initializing Node.js extensions... '), nonl=True)
for basename in sorted(os.listdir(js_extensions_dir)):
_, ext = os.path.splitext(basename)

if ext in js_interpreters.keys():
filename = os.path.join(js_extensions_dir, basename)
command = js_interpreters[ext] + [filename]
js_extensions.append((basename, command))

app.connect('source-read', run_js_extensions)
app.info('{} found'.format(len(js_extensions)))
app.verbose('JavaScript extensions: ' + ', '.join(dict(js_extensions).keys()))

def run_js_extensions(app, docname, source_list):
"""
Lets all registered Node.js extensions to process given document source
Executed for each document after the source gets read. Sequentially feeds
stdin of each registered Node.js with the source and continues with whatever
the extension sends to stdout. The extensions are provided with the document
name as the first CLI argument.
Hercule (https://www.npmjs.com/package/hercule) is built-in as if it was
the first Node.js extension in the pipeline.
"""
source = source_list[0]

app.verbose(console.bold("runnning JavaScript extension 'hercule'... ") + console.blue(docname))
command = [node_bin, os.path.join(node_modules_bin_dir, 'hercule'), '--relative=' + docs_dir, '--stdin']
source = run_extension('hercule', command, source)

for basename, command in js_extensions:
app.verbose(console.bold("runnning JavaScript extension '{}'... ".format(basename)) + console.blue(docname))
source = run_extension(basename, command + [docname], source)

source_list[0] = source

def run_extension(extension_name, command, source):
"""
Runs given command as a subprocess and lets it to modify given source
"""
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
proc.stdin.write(source.encode('utf-8'))
proc.stdin.close()

source = proc.stdout.read().decode('utf-8')
exit_status = proc.wait()
if exit_status:
message = "JavaScript extension '{}' finished with non-zero exit status: {}"
raise SphinxError(message.format(extension_name, exit_status))
return source


# -- Hacks ----------------------------------------------------------------

import sphinx.application
Expand Down
1 change: 0 additions & 1 deletion docs/contributing.md

This file was deleted.

0 comments on commit 84420b1

Please sign in to comment.