Skip to content

Commit

Permalink
ruff reformatting modification
Browse files Browse the repository at this point in the history
  • Loading branch information
Luca-Blight committed Oct 26, 2023
1 parent aa02e81 commit fe237ff
Show file tree
Hide file tree
Showing 144 changed files with 15,545 additions and 15,542 deletions.
288 changes: 144 additions & 144 deletions docs/plugins/conversion_table.py

Large diffs are not rendered by default.

38 changes: 19 additions & 19 deletions docs/plugins/griffe_doclinks.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,67 +9,67 @@
from pymdownx.slugs import slugify

DOCS_PATH = Path(__file__).parent.parent
slugifier = slugify(case="lower")
slugifier = slugify(case='lower')


def find_heading(content: str, slug: str, file_path: Path) -> Tuple[str, int]:
for m in re.finditer("^#+ (.+)", content, flags=re.M):
for m in re.finditer('^#+ (.+)', content, flags=re.M):
heading = m.group(1)
h_slug = slugifier(heading, "-")
h_slug = slugifier(heading, '-')
if h_slug == slug:
return heading, m.end()
raise ValueError(f"heading with slug {slug!r} not found in {file_path}")
raise ValueError(f'heading with slug {slug!r} not found in {file_path}')


def insert_at_top(path: str, api_link: str) -> str:
rel_file = path.rstrip("/") + ".md"
rel_file = path.rstrip('/') + '.md'
file_path = DOCS_PATH / rel_file
content = file_path.read_text()
second_heading = re.search("^#+ ", content, flags=re.M)
assert second_heading, "unable to find second heading in file"
second_heading = re.search('^#+ ', content, flags=re.M)
assert second_heading, 'unable to find second heading in file'
first_section = content[: second_heading.start()]

if f"[{api_link}]" not in first_section:
if f'[{api_link}]' not in first_section:
print(f'inserting API link "{api_link}" at the top of {file_path.relative_to(DOCS_PATH)}')
file_path.write_text('??? api "API Documentation"\n' f" [`{api_link}`][{api_link}]<br>\n\n" f"{content}")
file_path.write_text('??? api "API Documentation"\n' f' [`{api_link}`][{api_link}]<br>\n\n' f'{content}')

heading = file_path.stem.replace("_", " ").title()
heading = file_path.stem.replace('_', ' ').title()
return f'!!! abstract "Usage Documentation"\n [{heading}](../{rel_file})\n'


def replace_links(m: re.Match, *, api_link: str) -> str:
path_group = m.group(1)
if "#" not in path_group:
if '#' not in path_group:
# no heading id, put the content at the top of the page
return insert_at_top(path_group, api_link)

usage_path, slug = path_group.split("#", 1)
rel_file = usage_path.rstrip("/") + ".md"
usage_path, slug = path_group.split('#', 1)
rel_file = usage_path.rstrip('/') + '.md'
file_path = DOCS_PATH / rel_file
content = file_path.read_text()
heading, heading_end = find_heading(content, slug, file_path)

next_heading = re.search("^#+ ", content[heading_end:], flags=re.M)
next_heading = re.search('^#+ ', content[heading_end:], flags=re.M)
if next_heading:
next_section = content[heading_end : heading_end + next_heading.start()]
else:
next_section = content[heading_end:]

if f"[{api_link}]" not in next_section:
if f'[{api_link}]' not in next_section:
print(f'inserting API link "{api_link}" into {file_path.relative_to(DOCS_PATH)}')
file_path.write_text(
f"{content[:heading_end]}\n\n"
f'{content[:heading_end]}\n\n'
'??? api "API Documentation"\n'
f" [`{api_link}`][{api_link}]<br>"
f"{content[heading_end:]}"
f' [`{api_link}`][{api_link}]<br>'
f'{content[heading_end:]}'
)

return f'!!! abstract "Usage Documentation"\n [{heading}](../{rel_file}#{slug})\n'


def update_docstring(obj: GriffeObject) -> str:
return re.sub(
r"usage[\- ]docs: ?https://docs\.pydantic\.dev/.+?/(\S+)",
r'usage[\- ]docs: ?https://docs\.pydantic\.dev/.+?/(\S+)',
partial(replace_links, api_link=obj.path),
obj.docstring.value,
flags=re.I,
Expand Down
142 changes: 71 additions & 71 deletions docs/plugins/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

from .conversion_table import conversion_table

logger = logging.getLogger("mkdocs.plugin")
logger = logging.getLogger('mkdocs.plugin')
THIS_DIR = Path(__file__).parent
DOCS_DIR = THIS_DIR.parent
PROJECT_ROOT = DOCS_DIR.parent
Expand Down Expand Up @@ -60,34 +60,34 @@ def on_page_markdown(markdown: str, page: Page, config: Config, files: Files) ->


def add_changelog() -> None:
history = (PROJECT_ROOT / "HISTORY.md").read_text(encoding="utf-8")
history = re.sub(r"(\s)@([\w\-]+)", r"\1[@\2](https://github.com/\2)", history, flags=re.I)
history = re.sub(r"\[GitHub release]\(", r"[:simple-github: GitHub release](", history)
history = re.sub("@@", "@", history)
new_file = DOCS_DIR / "changelog.md"
history = (PROJECT_ROOT / 'HISTORY.md').read_text(encoding='utf-8')
history = re.sub(r'(\s)@([\w\-]+)', r'\1[@\2](https://github.com/\2)', history, flags=re.I)
history = re.sub(r'\[GitHub release]\(', r'[:simple-github: GitHub release](', history)
history = re.sub('@@', '@', history)
new_file = DOCS_DIR / 'changelog.md'

# avoid writing file unless the content has changed to avoid infinite build loop
if not new_file.is_file() or new_file.read_text(encoding="utf-8") != history:
new_file.write_text(history, encoding="utf-8")
if not new_file.is_file() or new_file.read_text(encoding='utf-8') != history:
new_file.write_text(history, encoding='utf-8')


def add_mkdocs_run_deps() -> None:
# set the pydantic and pydantic-core versions to configure for running examples in the browser
pyproject_toml = (PROJECT_ROOT / "pyproject.toml").read_text()
pyproject_toml = (PROJECT_ROOT / 'pyproject.toml').read_text()
pydantic_core_version = re.search(r'pydantic-core==(.+?)["\']', pyproject_toml).group(1)

version_py = (PROJECT_ROOT / "pydantic" / "version.py").read_text()
version_py = (PROJECT_ROOT / 'pydantic' / 'version.py').read_text()
pydantic_version = re.search(r'^VERSION ?= (["\'])(.+)\1', version_py, flags=re.M).group(2)

mkdocs_run_deps = json.dumps([f"pydantic=={pydantic_version}", f"pydantic-core=={pydantic_core_version}"])
logger.info("Setting mkdocs_run_deps=%s", mkdocs_run_deps)
mkdocs_run_deps = json.dumps([f'pydantic=={pydantic_version}', f'pydantic-core=={pydantic_core_version}'])
logger.info('Setting mkdocs_run_deps=%s', mkdocs_run_deps)

html = f"""\
<script>
window.mkdocs_run_deps = {mkdocs_run_deps}
</script>
"""
path = DOCS_DIR / "theme/mkdocs_run_deps.html"
path = DOCS_DIR / 'theme/mkdocs_run_deps.html'
path.write_text(html)


Expand Down Expand Up @@ -125,15 +125,15 @@ def add_tabs(match: re.Match[str]) -> str:
continue
last_code = tab_code

content = indent(f"{prefix}\n{tab_code}```{numbers}", " " * 4)
content = indent(f'{prefix}\n{tab_code}```{numbers}', ' ' * 4)
output.append(f'=== "Python 3.{minor_version} and above"\n\n{content}')

if len(output) == 1:
return match.group(0)
else:
return "\n\n".join(output)
return '\n\n'.join(output)

return re.sub(r"^(``` *py.*?)\n(.+?)^```(\s+(?:^\d+\. .+?\n)+)", add_tabs, markdown, flags=re.M | re.S)
return re.sub(r'^(``` *py.*?)\n(.+?)^```(\s+(?:^\d+\. .+?\n)+)', add_tabs, markdown, flags=re.M | re.S)


def _upgrade_code(code: str, min_version: int) -> str:
Expand Down Expand Up @@ -161,10 +161,10 @@ def replace_last_print(m2: re.Match[str]) -> str:
ind, json_text = m2.groups()
json_text = indent(json.dumps(json.loads(json_text), indent=2), ind)
# no trailing fence as that's not part of code
return f"\n{ind}```\n\n{ind}JSON output:\n\n{ind}```json\n{json_text}\n"
return f'\n{ind}```\n\n{ind}JSON output:\n\n{ind}```json\n{json_text}\n'

code = re.sub(r'\n( *)"""(.*?)\1"""\n$', replace_last_print, code, flags=re.S)
return f"{start}{attrs}{code}{start}\n"
return f'{start}{attrs}{code}{start}\n'

return re.sub(r'(^ *```)([^\n]*?output="json"[^\n]*?\n)(.+?)\1', replace_json, markdown, flags=re.M | re.S)

Expand All @@ -179,17 +179,17 @@ def remove_code_fence_attributes(markdown: str) -> str:

def remove_attrs(match: re.Match[str]) -> str:
suffix = re.sub(
r' (?:test|lint|upgrade|group|requires|output|rewrite_assert)=".+?"', "", match.group(2), flags=re.M
r' (?:test|lint|upgrade|group|requires|output|rewrite_assert)=".+?"', '', match.group(2), flags=re.M
)
return f"{match.group(1)}{suffix}"
return f'{match.group(1)}{suffix}'

return re.sub(r"^( *``` *py)(.*)", remove_attrs, markdown, flags=re.M)
return re.sub(r'^( *``` *py)(.*)', remove_attrs, markdown, flags=re.M)


def get_orgs_data() -> list[dict[str, str]]:
with (THIS_DIR / "orgs.toml").open("rb") as f:
with (THIS_DIR / 'orgs.toml').open('rb') as f:
orgs_data = tomli.load(f)
return orgs_data["orgs"]
return orgs_data['orgs']


tile_template = """
Expand All @@ -201,111 +201,111 @@ def get_orgs_data() -> list[dict[str, str]]:


def render_index(markdown: str, page: Page) -> str | None:
if page.file.src_uri != "index.md":
if page.file.src_uri != 'index.md':
return None

if version := os.getenv("PYDANTIC_VERSION"):
url = f"https://github.com/pydantic/pydantic/releases/tag/{version}"
version_str = f"Documentation for version: [{version}]({url})"
elif (version_ref := os.getenv("GITHUB_REF")) and version_ref.startswith("refs/tags/"):
version = re.sub("^refs/tags/", "", version_ref.lower())
url = f"https://github.com/pydantic/pydantic/releases/tag/{version}"
version_str = f"Documentation for version: [{version}]({url})"
elif sha := os.getenv("GITHUB_SHA"):
url = f"https://github.com/pydantic/pydantic/commit/{sha}"
if version := os.getenv('PYDANTIC_VERSION'):
url = f'https://github.com/pydantic/pydantic/releases/tag/{version}'
version_str = f'Documentation for version: [{version}]({url})'
elif (version_ref := os.getenv('GITHUB_REF')) and version_ref.startswith('refs/tags/'):
version = re.sub('^refs/tags/', '', version_ref.lower())
url = f'https://github.com/pydantic/pydantic/releases/tag/{version}'
version_str = f'Documentation for version: [{version}]({url})'
elif sha := os.getenv('GITHUB_SHA'):
url = f'https://github.com/pydantic/pydantic/commit/{sha}'
sha = sha[:7]
version_str = f"Documentation for development version: [{sha}]({url})"
version_str = f'Documentation for development version: [{sha}]({url})'
else:
version_str = "Documentation for development version"
logger.info("Setting version prefix: %r", version_str)
markdown = re.sub(r"{{ *version *}}", version_str, markdown)
version_str = 'Documentation for development version'
logger.info('Setting version prefix: %r', version_str)
markdown = re.sub(r'{{ *version *}}', version_str, markdown)

elements = [tile_template.format(**org) for org in get_orgs_data()]

orgs_grid = f'<div id="grid-container"><div id="company-grid" class="grid">{"".join(elements)}</div></div>'
return re.sub(r"{{ *organisations *}}", orgs_grid, markdown)
return re.sub(r'{{ *organisations *}}', orgs_grid, markdown)


def render_why(markdown: str, page: Page) -> str | None:
if page.file.src_uri != "why.md":
if page.file.src_uri != 'why.md':
return None

with (THIS_DIR / "using.toml").open("rb") as f:
using = tomli.load(f)["libs"]
with (THIS_DIR / 'using.toml').open('rb') as f:
using = tomli.load(f)['libs']

libraries = "\n".join("* [`{repo}`](https://github.com/{repo}) {stars:,} stars".format(**lib) for lib in using)
markdown = re.sub(r"{{ *libraries *}}", libraries, markdown)
default_description = "_(Based on the criteria described above)_"
libraries = '\n'.join('* [`{repo}`](https://github.com/{repo}) {stars:,} stars'.format(**lib) for lib in using)
markdown = re.sub(r'{{ *libraries *}}', libraries, markdown)
default_description = '_(Based on the criteria described above)_'

elements = [
f'### {org["name"]} {{#org-{org["key"]}}}\n\n{org.get("description") or default_description}'
for org in get_orgs_data()
]
return re.sub(r"{{ *organisations *}}", "\n\n".join(elements), markdown)
return re.sub(r'{{ *organisations *}}', '\n\n'.join(elements), markdown)


def _generate_table_row(col_values: list[str]) -> str:
return f'| {" | ".join(col_values)} |\n'


def _generate_table_heading(col_names: list[str]) -> str:
return _generate_table_row(col_names) + _generate_table_row(["-"] * len(col_names))
return _generate_table_row(col_names) + _generate_table_row(['-'] * len(col_names))


def build_schema_mappings(markdown: str, page: Page) -> str | None:
if page.file.src_uri != "usage/schema.md":
if page.file.src_uri != 'usage/schema.md':
return None

col_names = [
"Python type",
"JSON Schema Type",
"Additional JSON Schema",
"Defined in",
"Notes",
'Python type',
'JSON Schema Type',
'Additional JSON Schema',
'Defined in',
'Notes',
]
table_text = _generate_table_heading(col_names)

with (THIS_DIR / "schema_mappings.toml").open("rb") as f:
with (THIS_DIR / 'schema_mappings.toml').open('rb') as f:
table = tomli.load(f)

for t in table.values():
py_type = t["py_type"]
json_type = t["json_type"]
additional = t["additional"]
defined_in = t["defined_in"]
notes = t["notes"]
py_type = t['py_type']
json_type = t['json_type']
additional = t['additional']
defined_in = t['defined_in']
notes = t['notes']
if additional and not isinstance(additional, str):
additional = json.dumps(additional)
cols = [f"`{py_type}`", f"`{json_type}`", f"`{additional}`" if additional else "", defined_in, notes]
cols = [f'`{py_type}`', f'`{json_type}`', f'`{additional}`' if additional else '', defined_in, notes]
table_text += _generate_table_row(cols)

return re.sub(r"{{ *schema_mappings_table *}}", table_text, markdown)
return re.sub(r'{{ *schema_mappings_table *}}', table_text, markdown)


def build_conversion_table(markdown: str, page: Page) -> str | None:
if page.file.src_uri != "concepts/conversion_table.md":
if page.file.src_uri != 'concepts/conversion_table.md':
return None

filtered_table_predicates = {
"all": lambda r: True,
"json": lambda r: r.json_input,
"json_strict": lambda r: r.json_input and r.strict,
"python": lambda r: r.python_input,
"python_strict": lambda r: r.python_input and r.strict,
'all': lambda r: True,
'json': lambda r: r.json_input,
'json_strict': lambda r: r.json_input and r.strict,
'python': lambda r: r.python_input,
'python_strict': lambda r: r.python_input and r.strict,
}

for table_id, predicate in filtered_table_predicates.items():
table_markdown = conversion_table.filtered(predicate).as_markdown()
table_markdown = textwrap.indent(table_markdown, " ")
markdown = re.sub(rf"{{{{ *conversion_table_{table_id} *}}}}", table_markdown, markdown)
table_markdown = textwrap.indent(table_markdown, ' ')
markdown = re.sub(rf'{{{{ *conversion_table_{table_id} *}}}}', table_markdown, markdown)

return markdown


def devtools_example(markdown: str, page: Page) -> str | None:
if page.file.src_uri != "integrations/devtools.md":
if page.file.src_uri != 'integrations/devtools.md':
return None

html = (THIS_DIR / "devtools_output.html").read_text().strip("\n")
html = (THIS_DIR / 'devtools_output.html').read_text().strip('\n')
full_html = f'<div class="highlight">\n<pre><code>{html}</code></pre>\n</div>'
return re.sub(r"{{ *devtools_example *}}", full_html, markdown)
return re.sub(r'{{ *devtools_example *}}', full_html, markdown)
20 changes: 10 additions & 10 deletions docs/plugins/using_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,30 +10,30 @@


def update_lib(lib, *, retry=0):
repo = lib["repo"]
url = f"https://api.github.com/repos/{repo}"
repo = lib['repo']
url = f'https://api.github.com/repos/{repo}'
resp = session.get(url)
if resp.status_code == 403 and retry < 3:
print(f"retrying {repo} {retry}")
print(f'retrying {repo} {retry}')
sleep(5)
return update_lib(lib, retry=retry + 1)

resp.raise_for_status()
data = resp.json()
stars = data["watchers_count"]
print(f"{repo}: {stars}")
lib["stars"] = stars
stars = data['watchers_count']
print(f'{repo}: {stars}')
lib['stars'] = stars


with (THIS_DIR / "using.toml").open("rb") as f:
with (THIS_DIR / 'using.toml').open('rb') as f:
table = tomli.load(f)

libs = table["libs"]
libs = table['libs']
for lib in libs:
update_lib(lib)

libs.sort(key=lambda lib: lib["stars"], reverse=True)
libs.sort(key=lambda lib: lib['stars'], reverse=True)

with (THIS_DIR / "using.toml").open("w") as f:
with (THIS_DIR / 'using.toml').open('w') as f:
for lib in libs:
f.write('[[libs]]\nrepo = "{repo}"\nstars = {stars}\n'.format(**lib))
Loading

0 comments on commit fe237ff

Please sign in to comment.