forked from Normation/rudder-doc
/
generate-nav.py
executable file
·63 lines (53 loc) · 1.78 KB
/
generate-nav.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#!/usr/bin/env python
import os
import re
PAGESDIR = "modules/ROOT/pages/"
TITLE = re.compile("^(=+) (.+)$")
ID = re.compile(r"^[[(.+),?.*]]$")
def remove_duplicate_underscore(string):
prev = ' '
res = []
for char in string:
if prev == '_' and char == '_':
pass
else:
res.append(char)
prev = char
return ''.join(res)
# reproduce asciidoc's behavior
def slugify(s):
s = "_" + s
s = s.lower()
s = s.strip()
s = re.sub('\W', '_', s)
s = remove_duplicate_underscore(s)
s = s.rstrip('_')
return s
os.chdir(PAGESDIR)
# Get all standalone .adoc pages, sorted alphanumerically
# We exclude files in root (the index of the doc), _partials which are not actual pages
files = sorted([root.split('/', 1)[-1]+"/"+file for root, dirs, files in os.walk('.') for file in files if file.endswith(".adoc") and not "_partials" in root and not root == "."])
result = ["// Automatically generated list of content - do not edit"]
for file in files:
with open(file) as f:
content = f.read().splitlines()
prev = ""
first = True
for line in content:
search_title = TITLE.search(line)
if search_title:
level = search_title.group(1).count("=") + 1
title = search_title.group(2)
search_id = ID.search(prev)
if search_id:
page_id = search_id.group(1)
else:
page_id = slugify(title)
if level < 6:
if first:
result.append("*" * level + " xref:" + file + "[" + title + "]")
first = False
else:
result.append("*" * level + " xref:" + file + "#" + page_id + "[" + title + "]")
prev = line
print("\n".join(result))