Skip to content

Commit

Permalink
clean up subcommand naming
Browse files Browse the repository at this point in the history
  • Loading branch information
chapmanjacobd committed Apr 14, 2024
1 parent 01eb403 commit 33b4e1e
Show file tree
Hide file tree
Showing 29 changed files with 153 additions and 123 deletions.
27 changes: 10 additions & 17 deletions tests/test_lb.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,8 @@

import pytest

from xklb.lb import library as lb
from xklb.play_actions import watch as wt

from xklb.lb import library as lb, progs
from xklb.utils.log_utils import log

def test_lb_help(capsys):
sys.argv = ["lb"]
Expand All @@ -19,18 +18,12 @@ def test_lb_help(capsys):
assert "subcommands" in captured


def test_wt_help(capsys):
wt_help_text = "usage:,where,sort,--duration".split(",")

sys.argv = ["wt", "-h"]
with pytest.raises(SystemExit):
wt()
captured = capsys.readouterr().out.replace("\n", "")
for help_text in wt_help_text:
assert help_text in captured
def test_duplicate_args(capsys):
for _, category_progs in progs.items():
for subcommand, _ in category_progs.items():
with pytest.raises(SystemExit):
lb([subcommand, '-h'])
captured = capsys.readouterr().out.replace("\n", "")
assert f'usage: library {subcommand.replace("_", "-")}' in captured

with pytest.raises(SystemExit):
lb(["wt", "-h"])
captured = capsys.readouterr().out.replace("\n", "")
for help_text in wt_help_text:
assert help_text in captured
log.info(len(captured))
4 changes: 2 additions & 2 deletions tests/test_rss.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
def test_get_rss_exists():
feed = rss_extract.get_feed("https://simonwillison.net/atom/everything/")

assert feed.feed.title == "Simon Willison's Weblog"
assert feed.feed.title == "Simon Willison's Weblog" # type: ignore


def test_get_rss_redirect():
feed = rss_extract.get_feed("https://micro.mjdescy.me/")

assert feed.feed.title == "Michael Descy"
assert feed.feed.title == "Michael Descy" # type: ignore
6 changes: 3 additions & 3 deletions xklb/fs_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def parse_args(action, usage):
arggroups.debug(parser)

arggroups.database(parser)
if action == SC.fsadd:
if action == SC.fs_add:
parser.add_argument("paths", nargs="+")
args = parser.parse_intermixed_args()

Expand Down Expand Up @@ -403,15 +403,15 @@ def fs_add(args=None) -> None:
if args:
sys.argv = ["lb", *args]

args, _parser = parse_args(SC.fsadd, usage.fsadd)
args, _parser = parse_args(SC.fs_add, usage.fs_add)
extractor(args, args.paths)


def fs_update(args=None) -> None:
if args:
sys.argv = ["lb", *args]

args, parser = parse_args(SC.fsupdate, usage.fsupdate)
args, parser = parse_args(SC.fs_update, usage.fs_update)

fs_playlists = list(
args.db.query(
Expand Down
8 changes: 4 additions & 4 deletions xklb/gdl_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ def parse_args(action, usage) -> argparse.Namespace:
arggroups.debug(parser)

arggroups.database(parser)
if action == SC.galleryadd:
if action == SC.gallery_add:
parser.add_argument(
"playlists", nargs="*", default=argparse_utils.STDIN_DASH, action=argparse_utils.ArgparseArgsOrStdin
)

args = parser.parse_intermixed_args()
args.action = action

if action == SC.galleryadd:
if action == SC.gallery_add:
Path(args.database).touch()
args.db = db_utils.connect(args)

Expand All @@ -49,7 +49,7 @@ def gallery_add(args=None) -> None:
if args:
sys.argv = ["galleryadd", *args]

args = parse_args(SC.galleryadd, usage=usage.galleryadd)
args = parse_args(SC.gallery_add, usage=usage.gallery_add)

if args.insert_only:
args.db["media"].insert_all(
Expand Down Expand Up @@ -89,7 +89,7 @@ def gallery_update(args=None) -> None:
if args:
sys.argv = ["galleryupdate", *args]

args = parse_args(SC.galleryupdate, usage=usage.galleryupdate)
args = parse_args(SC.gallery_update, usage=usage.gallery_update)

gdl_playlists = db_playlists.get_all(
args,
Expand Down
2 changes: 1 addition & 1 deletion xklb/hn_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ async def run(args, db_queue):


def hacker_news_add() -> None:
args = parse_args(prog="library hnadd", usage=usage.hnadd)
args = parse_args(prog="library hnadd", usage=usage.hn_add)
try:
import aiohttp
except ModuleNotFoundError:
Expand Down
34 changes: 17 additions & 17 deletions xklb/lb.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,20 @@

progs = {
"Create database subcommands": {
"fsadd": "Add local media",
"tubeadd": "Add online video media (yt-dlp)",
"webadd": "Add open-directory media",
"galleryadd": "Add online gallery media (gallery-dl)",
"tabsadd": "Create a tabs database; Add URLs",
"fs_add": "Add local media",
"tube_add": "Add online video media (yt-dlp)",
"web_add": "Add open-directory media",
"gallery_add": "Add online gallery media (gallery-dl)",
"tabs_add": "Create a tabs database; Add URLs",
"links_add": "Create a link-scraping database",
"siteadd": "Auto-scrape website data to SQLITE",
"redditadd": "Create a reddit database; Add subreddits",
"site_add": "Auto-scrape website data to SQLITE",
"reddit_add": "Create a reddit database; Add subreddits",
"pushshift": "Convert pushshift data to reddit.db format (stdin)",
"hnadd": "Create / Update a Hacker News database",
"hn_add": "Create / Update a Hacker News database",
"substack": "Backup substack articles",
"tildes": "Backup tildes comments and topics",
"places_import": "Import places of interest (POIs)",
"add_row": "Add arbitrary data to SQLITE",
"row_add": "Add arbitrary data to SQLITE",
},
"Text subcommands": {
"cluster_sort": "Sort text and images by similarity",
Expand Down Expand Up @@ -89,12 +89,12 @@
"tabs_shuffle": "Randomize tabs.db a bit",
},
"Update database subcommands": {
"fsupdate": "Update local media",
"tubeupdate": "Update online video media",
"webupdate": "Update open-directory media",
"galleryupdate": "Update online gallery media",
"fs_update": "Update local media",
"tube_update": "Update online video media",
"web_update": "Update open-directory media",
"gallery_update": "Update online gallery media",
"links_update": "Update a link-scraping database",
"redditupdate": "Update reddit media",
"reddit_update": "Update reddit media",
},
"Misc subcommands": {
"export_text": "Export HTML files from SQLite databases",
Expand Down Expand Up @@ -185,7 +185,7 @@ def add_parser(subparsers, func, aliases=None):
add_parser(subparsers, "xklb.play_actions.watch", ["wt", "tubewatch", "tw", "entries"])
add_parser(subparsers, "xklb.reddit_extract.reddit_add", ["ra"])
add_parser(subparsers, "xklb.reddit_extract.reddit_update", ["ru"])
add_parser(subparsers, "xklb.scripts.add_row.add_row")
add_parser(subparsers, "xklb.scripts.row_add.row_add", ['add_row'])
add_parser(subparsers, "xklb.scripts.big_dirs.big_dirs", ["large-folders"])
add_parser(subparsers, "xklb.scripts.block.block")
add_parser(subparsers, "xklb.scripts.christen.christen")
Expand Down Expand Up @@ -215,6 +215,7 @@ def add_parser(subparsers, func, aliases=None):
add_parser(subparsers, "xklb.scripts.mining.substack.substack")
add_parser(subparsers, "xklb.scripts.mining.tildes.tildes")
add_parser(subparsers, "xklb.scripts.move_list.move_list", ["mv-list"])
add_parser(subparsers, "xklb.scripts.mount_stats.mount_stats", ["mu", "mount-usage"])
add_parser(subparsers, "xklb.scripts.open_links.open_links", ["links-open"])
add_parser(subparsers, "xklb.scripts.optimize_db.optimize_db", ["optimize"])
add_parser(subparsers, "xklb.scripts.places_import.places_import")
Expand All @@ -233,15 +234,14 @@ def add_parser(subparsers, func, aliases=None):
add_parser(subparsers, "xklb.scripts.search_db.search_db", ["s", "sdb", "search-dbs"])
add_parser(subparsers, "xklb.scripts.streaming_tab_loader.streaming_tab_loader", ["surf"])
add_parser(subparsers, "xklb.scripts.web_add.web_add", ["web-dir-add"])
add_parser(subparsers, "xklb.scripts.web_update.web_update", ["web-dir-update"])
add_parser(subparsers, "xklb.scripts.web_add.web_update", ["web-dir-update"])
add_parser(subparsers, "xklb.search.search", ["sc", "search-captions"])
add_parser(subparsers, "xklb.site_extract.site_add", ["sa", "sql-site", "site-sql"])
add_parser(subparsers, "xklb.tabs_actions.tabs", ["tb"])
add_parser(subparsers, "xklb.tabs_extract.tabs_add")
add_parser(subparsers, "xklb.tabs_extract.tabs_shuffle")
add_parser(subparsers, "xklb.tube_extract.tube_add", ["ta", "dladd", "da"])
add_parser(subparsers, "xklb.tube_extract.tube_update", ["dlupdate", "tu"])
add_parser(subparsers, "xklb.utils.devices.mount_stats", ["mu", "mount-usage"])

parser.add_argument("--version", "-V", action="store_true")

Expand Down
2 changes: 1 addition & 1 deletion xklb/media/books.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

def munge_book_tags(path) -> dict:
try:
import textract
import textract # type: ignore
except ModuleNotFoundError:
print(
"textract is required for text database creation: pip install textract; sudo dnf install libxml2-devel libxslt-devel antiword unrtf poppler-utils tesseract sox-plugins-nonfree sox libjpeg-devel swig",
Expand Down
2 changes: 1 addition & 1 deletion xklb/media/dedupe.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def parse_args() -> argparse.Namespace:
args = parser.parse_intermixed_args()
args.db = db_utils.connect(args)

args.action = consts.SC.dedupe
args.action = consts.SC.dedupe_media

args.sort = "\n , ".join(filter(bool, args.sort))
args.sort = args.sort.replace(",,", ",")
Expand Down
2 changes: 1 addition & 1 deletion xklb/readme.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@
Tubeupdate will go through the list of added playlists and fetch metadata for
any videos not previously seen.
library tubeupdate tube.db
library tube-update tube.db
### 2. Watch / Listen from websites
Expand Down
4 changes: 2 additions & 2 deletions xklb/reddit_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def reddit_add(args=None) -> None:
if args:
sys.argv = ["lb", *args]

args = parse_args("redditadd", usage=usage.redditadd)
args = parse_args("reddit_add", usage=usage.reddit_add)

for path in args.paths:
path = path.lower()
Expand Down Expand Up @@ -352,7 +352,7 @@ def reddit_update(args=None) -> None:
if args:
sys.argv = ["lb", *args]

args = parse_args("redditupdate", usage=usage.redditupdate)
args = parse_args("reddit-update", usage=usage.reddit_update)
reddit_playlists = db_playlists.get_all(
args,
"extractor_key, path, extractor_playlist_id, extractor_config",
Expand Down
2 changes: 1 addition & 1 deletion xklb/scripts/big_dirs.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def parse_args() -> argparse.Namespace:
if args.size:
args.size = sql_utils.parse_human_to_sql(nums.human_to_bytes, "size", args.size)

args.action = consts.SC.bigdirs
args.action = consts.SC.big_dirs
log.info(objects.dict_filter_bool(args.__dict__))
return args

Expand Down
4 changes: 2 additions & 2 deletions xklb/scripts/dedupe_czkawka.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import humanize
from screeninfo import get_monitors

from xklb import post_actions
from xklb import post_actions, usage
from xklb.media import media_player
from xklb.utils import arggroups, consts, devices, file_utils, iterables, mpv_utils, processes
from xklb.utils.log_utils import log
Expand All @@ -15,7 +15,7 @@


def parse_args():
parser = argparse.ArgumentParser(description="Choose which duplicate to keep by opening both side-by-side in mpv")
parser = argparse.ArgumentParser(usage.dedupe_czkawka)
arggroups.playback(parser)
arggroups.capability_clobber(parser)
arggroups.capability_delete(parser)
Expand Down
2 changes: 1 addition & 1 deletion xklb/scripts/disk_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def parse_args() -> argparse.Namespace:
if args.size:
args.size = sql_utils.parse_human_to_sql(nums.human_to_bytes, "size", args.size)

args.action = consts.SC.diskusage
args.action = consts.SC.disk_usage
log.info(objects.dict_filter_bool(args.__dict__))
return args

Expand Down
2 changes: 1 addition & 1 deletion xklb/scripts/history.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def parse_args() -> argparse.Namespace:
default="watched",
const="watched",
nargs="?",
help=f"One of: {', '.join(consts.time_facets)} (default: %(default)s)",
help=f"One of: {', '.join(consts.time_facets)}",
)
parser.add_argument("--hide-deleted", action="store_true")
parser.add_argument("--played", "--opened", action="store_true")
Expand Down
3 changes: 2 additions & 1 deletion xklb/scripts/mining/markdown_links.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import argparse, urllib.parse

from xklb import usage
from xklb.utils import arg_utils, arggroups, web

COMMON_SITE_TITLE_SUFFIXES = [
Expand All @@ -23,7 +24,7 @@ def fake_title(url):


def markdown_links():
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(usage=usage.markdown_links)
arggroups.requests(parser)
arggroups.selenium(parser)
arggroups.debug(parser)
Expand Down
12 changes: 7 additions & 5 deletions xklb/scripts/mining/nouns.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
import argparse
import sys
from html.parser import HTMLParser
from io import StringIO

from xklb import usage
from xklb.data import wordbank
from xklb.utils import printing
from xklb.utils import arggroups, printing

"""
extract compound nouns and phrases from unstructured mixed HTML plain text

xsv select text hn_comment_202210242109.csv | library nouns | sort | uniq -c | sort --numeric-sort
"""


class MLStripper(HTMLParser):
Expand Down Expand Up @@ -65,5 +63,9 @@ def line_processor(txt) -> None:


def nouns() -> None:
parser = argparse.ArgumentParser(usage.nouns)
arggroups.debug(parser)
args = parser.parse_args()

for line in sys.stdin:
line_processor(line)
26 changes: 26 additions & 0 deletions xklb/scripts/mount_stats.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from xklb import usage
from xklb.utils import arggroups
from xklb.utils.devices import get_mount_stats


import argparse


# TODO: filter out mount points with different paths but are subpaths of the same mount point

def mount_stats() -> None:
parser = argparse.ArgumentParser(usage=usage.mount_stats)
arggroups.debug(parser)

parser.add_argument("mounts", nargs="+")
args = parser.parse_args()

space = get_mount_stats(args.mounts)

print("Relative disk dependence:")
for d in space:
print(f"{d['mount']}: {'#' * int(d['used'] * 80)} {d['used']:.1%}")

print("\nRelative free space:")
for d in space:
print(f"{d['mount']}: {'#' * int(d['free'] * 80)} {d['free']:.1%}")
2 changes: 1 addition & 1 deletion xklb/scripts/playback_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def playback_stop() -> None:


def playback_pause() -> None:
args = parse_args("next")
args = parse_args("pause")
playing = _now_playing(args)

if playing["catt"]:
Expand Down
4 changes: 2 additions & 2 deletions xklb/scripts/add_row.py → xklb/scripts/row_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ def get_val():
return kwargs


def add_row():
parser = argparse.ArgumentParser(description="Add arbitrary rows to a SQLITE db", usage=usage.add_row)
def row_add():
parser = argparse.ArgumentParser(description="Add arbitrary rows to a SQLITE db", usage=usage.row_add)
parser.add_argument("--table-name", "--table", "-t", default="media")
arggroups.debug(parser)

Expand Down
Loading

0 comments on commit 33b4e1e

Please sign in to comment.