Skip to content

Commit

Permalink
Rewrite README to include blog/TILs/releases
Browse files Browse the repository at this point in the history
  • Loading branch information
simonw committed Jul 10, 2020
1 parent f727f09 commit d2b5e8b
Show file tree
Hide file tree
Showing 4 changed files with 200 additions and 0 deletions.
41 changes: 41 additions & 0 deletions .github/workflows/build.yml
@@ -0,0 +1,41 @@
name: Build README

on:
push:
workflow_dispatch:
schedule:
- cron: '32 * * * *'

jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- uses: actions/cache@v2
name: Configure pip caching
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Install Python dependencies
run: |
python -m pip install -r requirements.txt
- name: Update README
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |-
python build_readme.py
cat README.md
- name: Commit and push if README changed
run: |-
git diff
git config --global user.email "readme-bot@example.com"
git config --global user.name "README-bot"
git diff --quiet || (git add README.md && git commit -m "Updated README")
git push
17 changes: 17 additions & 0 deletions README.md
@@ -1 +1,18 @@
Currently mostly working on [Datasette](https://github.com/simonw/datasette) and associated projects.

<table><tr><td valign="top">

### Recent releases
<!-- recent_releases starts -->
<!-- recent_releases ends -->
</td><td valign="top">

### TIL
<!-- tils starts -->
<!-- tils ends -->
</td><td valign="top">

### On my blog
<!-- blog starts -->
<!-- blog ends -->
</td></tr></table>
139 changes: 139 additions & 0 deletions build_readme.py
@@ -0,0 +1,139 @@
from python_graphql_client import GraphqlClient
import feedparser
import httpx
import pathlib
import re
import os

root = pathlib.Path(__file__).parent.resolve()
client = GraphqlClient(endpoint="https://api.github.com/graphql")


TOKEN = os.environ.get("GITHUB_TOKEN", "")


def replace_chunk(content, marker, chunk):
r = re.compile(
r"<!\-\- {} starts \-\->.*<!\-\- {} ends \-\->".format(marker, marker),
re.DOTALL,
)
chunk = "<!-- {} starts -->\n{}\n<!-- {} ends -->".format(marker, chunk, marker)
return r.sub(chunk, content)


def make_query(after_cursor=None):
return """
query {
viewer {
repositories(first: 100, privacy: PUBLIC, after:AFTER) {
pageInfo {
hasNextPage
endCursor
}
nodes {
name
releases(last:1) {
totalCount
nodes {
name
publishedAt
url
}
}
}
}
}
}
""".replace(
"AFTER", '"{}"'.format(after_cursor) if after_cursor else "null"
)


def fetch_releases(oauth_token):
repos = []
releases = []
repo_names = set()
has_next_page = True
after_cursor = None

while has_next_page:
data = client.execute(
query=make_query(after_cursor),
headers={"Authorization": "Bearer {}".format(oauth_token)},
)
for repo in data["data"]["viewer"]["repositories"]["nodes"]:
if repo["releases"]["totalCount"] and repo["name"] not in repo_names:
repos.append(repo)
repo_names.add(repo["name"])
releases.append(
{
"repo": repo["name"],
"release": repo["releases"]["nodes"][0]["name"]
.replace(repo["name"], "")
.strip(),
"published_at": repo["releases"]["nodes"][0][
"publishedAt"
].split("T")[0],
"url": repo["releases"]["nodes"][0]["url"],
}
)
has_next_page = data["data"]["viewer"]["repositories"]["pageInfo"][
"hasNextPage"
]
after_cursor = data["data"]["viewer"]["repositories"]["pageInfo"]["endCursor"]
return releases


def fetch_tils():
sql = "select title, url, created_utc from til order by created_utc desc limit 10"
return httpx.get(
"https://til.simonwillison.net/til.json",
params={"sql": sql, "_shape": "array",},
).json()


def fetch_blog_entries():
entries = feedparser.parse("https://simonwillison.net/atom/entries/")["entries"]
return [
{
"title": entry["title"],
"url": entry["link"].split("#")[0],
"published": entry["published"].split("T")[0],
}
for entry in entries
]


if __name__ == "__main__":
readme = root / "README.md"
releases = fetch_releases(TOKEN)
releases.sort(key=lambda r: r["published_at"], reverse=True)
md = "\n".join(
[
"* [{repo} {release}]({url}) - {published_at}".format(**release)
for release in releases[:10]
]
)
readme_contents = readme.open().read()
rewritten = replace_chunk(readme_contents, "recent_releases", md)

tils = fetch_tils()
tils_md = "\n".join(
[
"* [{title}]({url}) - {created_at}".format(
title=til["title"],
url=til["url"],
created_at=til["created_utc"].split("T")[0],
)
for til in tils
]
)
rewritten = replace_chunk(rewritten, "tils", tils_md)

entries = fetch_blog_entries()[:10]
entries_md = "\n".join(
["* [{title}]({url}) - {published}".format(**entry) for entry in entries]
)
rewritten = replace_chunk(rewritten, "blog", entries_md)

readme.open("w").write(rewritten)
3 changes: 3 additions & 0 deletions requirements.txt
@@ -0,0 +1,3 @@
python-graphql-client==0.3.0
httpx
feedparser

0 comments on commit d2b5e8b

Please sign in to comment.