From 7b1e390312ed490281a6880c3b4c7c0f9680f962 Mon Sep 17 00:00:00 2001 From: Tim Sherratt Date: Wed, 24 Apr 2024 14:57:42 +1000 Subject: [PATCH] Update repository, add metadata, add notebook to get images from collections --- .gitignore | 6 +- README.md | 82 +- download-image-collection.ipynb | 306 +++++++ not_available.jpg | Bin 0 -> 49429 bytes pyproject.toml | 2 +- requirements-dev.in | 8 + requirements-dev.txt | 554 ++++++++++-- requirements.in | 5 +- requirements.txt | 398 ++++++--- rights-on-images.csv | 180 ---- rights-on-out-of-copyright-photos.csv | 160 ---- rights-statements-on-images.ipynb | 448 +++++++--- ro-crate-metadata.json | 273 ++++++ runtime.txt | 2 +- scripts/add_nb_metadata.py | 40 + scripts/create_previews.py | 34 + scripts/extract_metadata.py | 38 + scripts/generate_readme.py | 54 ++ scripts/list_imports.py | 41 + test_and_lint.sh => scripts/test_and_lint.sh | 0 scripts/update_crate.py | 837 ++++++++++++++++++ .../update_version.sh | 3 +- 22 files changed, 2726 insertions(+), 745 deletions(-) create mode 100644 download-image-collection.ipynb create mode 100644 not_available.jpg delete mode 100644 rights-on-images.csv delete mode 100644 rights-on-out-of-copyright-photos.csv create mode 100644 ro-crate-metadata.json create mode 100644 scripts/add_nb_metadata.py create mode 100644 scripts/create_previews.py create mode 100644 scripts/extract_metadata.py create mode 100644 scripts/generate_readme.py create mode 100644 scripts/list_imports.py rename test_and_lint.sh => scripts/test_and_lint.sh (100%) create mode 100755 scripts/update_crate.py rename update_version.sh => scripts/update_version.sh (91%) diff --git a/.gitignore b/.gitignore index 664f2cd..89019be 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,8 @@ cats.csv a116159h.jpg d1_10099h.jpg .env -*.old \ No newline at end of file +*.old +git_all_versions.sh +harvested-metadata-all.ndjson +harvested-metadata.ndjson +images-viewcopy.ndjson diff --git a/README.md b/README.md index cb15f5d..ecff42a 100644 --- a/README.md +++ b/README.md @@ -1,85 +1,21 @@ -# Trove images +# trove-images -Current version: [v1.0.1](https://github.com/GLAM-Workbench/trove-images/releases/tag/v1.0.1) +A GLAM Workbench repository -Jupyter notebooks to work with data from Trove's picture zone. For more information see the [Trove images](https://glam-workbench.net/trove-images/) section of the GLAM Workbench. +For more information and documentation see the [Trove images](https://glam-workbench.net/trove-images) section of the [GLAM Workbench](https://glam-workbench.net). -## Notebook topics +## Notebooks +- [Download a collection of digitised images](https://github.com/GLAM-Workbench/trove-images/blob/master/download-image-collection.ipynb) +- [The use of standard licences and rights statements in Trove image records](https://github.com/GLAM-Workbench/trove-images/blob/master/rights-statements-on-images.ipynb) -* [The use of standard licences and rights statements in Trove image records](rights-statements-on-images.ipynb) – build a picture of which rights statements are currently being used, and by who. -## Datasets +## Associated datasets +- [trove-images-rights-data](https://github.com/GLAM-Workbench/trove-images-rights-data/) -**Harvested 9 March 2022** - -* [Rights applied to images by each Trove contributor](https://github.com/GLAM-Workbench/trove-images/blob/master/rights-on-images.csv) -* [Rights applied to out-of-copyright photographs by each Trove contributor](https://github.com/GLAM-Workbench/trove-images/blob/master/rights-on-out-of-copyright-photos.csv) -## Run these notebooks - -There are a number of different ways to use these notebooks. Binder is quickest and easiest, but it doesn't save your data. I've listed the options below from easiest to most complicated (requiring more technical knowledge). - -### Using Binder - -[![Launch on Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/GLAM-Workbench/trove-images/master/?urlpath=lab/tree/index.md) - -Click on the button above to launch the notebooks in this repository using the [Binder](https://mybinder.org/) service (it might take a little while to load). This is a free service, but note that sessions will close if you stop using the notebooks, and no data will be saved. Make sure you download any changed notebooks or harvested data that you want to save. - -See [Using Binder](https://glam-workbench.net/using-binder/) for more details. - -### Using Reclaim Cloud - -[![Launch on Reclaim Cloud](https://glam-workbench.github.io/images/launch-on-reclaim-cloud.svg)](https://app.my.reclaim.cloud/?manifest=https://raw.githubusercontent.com/GLAM-Workbench/trove-images/master/reclaim-manifest.jps) - -[Reclaim Cloud](https://reclaim.cloud/) is a paid hosting service, aimed particularly at supported digital scholarship in hte humanities. Unlike Binder, the environments you create on Reclaim Cloud will save your data – even if you switch them off! To run this repository on Reclaim Cloud for the first time: - -* Create a [Reclaim Cloud](https://reclaim.cloud/) account and log in. -* Click on the button above to start the installation process. -* A dialogue box will ask you to set a password, this is used to limit access to your Jupyter installation. -* Sit back and wait for the installation to complete! -* Once the installation is finished click on the 'Open in Browser' button of your newly created environment (note that you might need to wait a few minutes before everything is ready). - -See [Using Reclaim Cloud](https://glam-workbench.net/using-reclaim-cloud/) for more details. - -### Using Docker - -You can use Docker to run a pre-built computing environment on your own computer. It will set up everything you need to run the notebooks in this repository. This is free, but requires more technical knowledge – you'll have to install Docker on your computer, and be able to use the command line. - -* Install [Docker Desktop](https://docs.docker.com/get-docker/). -* Create a new directory for this repository and open it from the command line. -* From the command line, run the following command: - ``` - docker run -p 8888:8888 --name trove-images -v "$PWD":/home/jovyan/work quay.io/glamworkbench/trove-images repo2docker-entrypoint jupyter lab --ip 0.0.0.0 --NotebookApp.token='' --LabApp.default_url='/lab/tree/index.ipynb' - ``` -* It will take a while to download and configure the Docker image. Once it's ready you'll see a message saying that Jupyter Notebook is running. -* Point your web browser to `http://127.0.0.1:8888` - -See [Using Docker](https://glam-workbench.net/using-docker/) for more details. - -### Setting up on your own computer - -If you know your way around the command line and are comfortable installing software, you might want to set up your own computer to run these notebooks. - -Assuming you have recent versions of Python and Git installed, the steps might be something like: - -* Create a virtual environment, eg: `python -m venv trove-images` -* Open the new directory" `cd trove-images` -* Activate the environment `source bin/activate` -* Clone the repository: `git clone https://github.com/GLAM-Workbench/trove-images.git notebooks` -* Open the new `notebooks` directory: `cd notebooks` -* Install the necessary Python packages: `pip install -r requirements.txt` -* Run Jupyter: `jupyter lab` - -See the [GLAM Workbench for [more details](https://glam-workbench.net/getting-started/#using-python-on-your-own-computer. - -## Cite as - -See the [GLAM Workbench](https://glam-workbench.net/trove-images/) for up-to-date citation details. - ---- - -This repository is part of the [GLAM Workbench](https://glam-workbench.github.io/). +Created by [Tim Sherratt](https://timsherratt.au) for the [GLAM Workbench](https://glam-workbench.net) \ No newline at end of file diff --git a/download-image-collection.ipynb b/download-image-collection.ipynb new file mode 100644 index 0000000..c8606b6 --- /dev/null +++ b/download-image-collection.ipynb @@ -0,0 +1,306 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "824b559a-0f98-49f2-b690-b09838a0a978", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "source": [ + "# Download a collection of digitised images\n", + "\n", + "Digitised photographs and other images are often organised into collections. While the Trove web interface does include a download option for collections, it has a number of limitations:\n", + "\n", + "- the images are all combined into a single zip file\n", + "- you can generally download a maximum of 20 images at a time\n", + "- the resolution of the downloaded images is often quite low\n", + "\n", + "This notebook provides an alternative method that downloads all of the available images in a collection (and any sub-collections) at the highest available resolution. The method is as follows:\n", + "\n", + "- the `nla.obj` identifiers for all the items in the collection are harvested from the browse interface\n", + "- a url to download a high-resolution version of the image is constructed using each `nla.obj` id\n", + "- each image is downloaded and saved\n", + "\n", + "The downloaded images will be saved in the `images/[COLLECTION ID]` folder. Once the harvest is complete, the dataset will be zipped up with an [RO-Crate](https://www.researchobject.org/ro-crate/) metadata file and a link displayed for easy download. The RO-Crate metadata file captures the context and results of the harvest.\n", + "\n", + "The image file names use the `nla.obj` identifiers. For example, the image of `nla.obj-147116797` is saved as `nla.obj-147116797.jpg`. The identifiers also link the image back to the website: `nla.obj-147116797.jpg` comes from `https://nla.gov.au/nla.obj-147116797`.\n", + "\n", + "## Finding collections of images\n", + "\n", + "There's no direct way of searching for *collections*, they tend to be mixed up in search results with individual images. Not all digitised images are in collections, but if they are you can use the breadcrumbs navigation to move up the hierarchy. Each level in the collection hierarchy will have it's own `nla.obj` identifier that you can use to download images from that level and below.\n", + "\n", + "For example, [this excellent poster](https://nla.gov.au/nla.obj-133781081) is part of a very large collection of digitised posters and exists at the bottom of the breadcrumb hierarchy: **Home > Guide to Pre-1950 Advertising Posters in the National Library of Australia digitised by the 2019 Tax Time Appeal > Poster drawers > Posters**. Clicking on 'Guide to Pre-1950 Advertising Posters', 'Poster drawers', or 'Posters' will take you to different levels in the collection hierarchy. You can then just copy the `nla.obj` identifier from the url and paste it below to download all child images.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1468eb33-0bad-42f7-acfd-9aec17dcaa94", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "import mimetypes\n", + "import os\n", + "import time\n", + "from datetime import datetime, timedelta\n", + "from pathlib import Path\n", + "\n", + "import ipynbname\n", + "import nbformat\n", + "import requests_cache\n", + "from bs4 import BeautifulSoup\n", + "from dotenv import load_dotenv\n", + "from IPython.display import HTML, display\n", + "from requests.adapters import HTTPAdapter\n", + "from requests.packages.urllib3.util.retry import Retry\n", + "from rocrate.rocrate import ContextEntity, ROCrate\n", + "from tqdm.auto import tqdm\n", + "\n", + "s = requests_cache.CachedSession(expire_after=timedelta(days=30))\n", + "retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])\n", + "s.mount(\"https://\", HTTPAdapter(max_retries=retries))\n", + "s.mount(\"http://\", HTTPAdapter(max_retries=retries))\n", + "\n", + "load_dotenv()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d34a18bc-935b-4f5b-a647-5166f475968e", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "def harvest_collection_items(collection_id, include_subcollections=False):\n", + " \"\"\"\n", + " Harvest all the items in a Trove collection (including any sub-collections)\n", + " by scraping the item identifiers from the 'Browse collection' pop-up.\n", + " See the Trove Data Guide:\n", + " \"\"\"\n", + " # The initial startIdx value\n", + " start = 0\n", + " # Number of results per page, used to increment the startIdx value\n", + " n = 20\n", + " items = []\n", + " # If there aren't 20 results on the page then we've reached the end, so continue harvesting until that happens.\n", + " while n == 20:\n", + " url = f\"https://nla.gov.au/{collection_id}/browse?startIdx={start}&rows=20&op=c\"\n", + " # Get the browse page\n", + " response = s.get(url)\n", + "\n", + " # Beautifulsoup turns the HTML into an easily navigable structure\n", + " soup = BeautifulSoup(response.text, \"html.parser\")\n", + "\n", + " # Find all the divs containing issue details and loop through them\n", + " details = soup.find_all(class_=\"l-item-info\")\n", + " for detail in details:\n", + " # Set a default type\n", + " item_type = \"item\"\n", + "\n", + " # Look for the a tag with class \"obj-reference content\"\n", + " item_id = detail.find(\n", + " lambda tag: tag.name == \"a\"\n", + " and tag.get(\"class\") == [\"obj-reference\", \"content\"]\n", + " )[\"href\"].strip(\"/\")\n", + "\n", + " # Look for a link to 'children', indicating it's a subcollection (or a book or issue with pages)\n", + " has_children = detail.find(\n", + " lambda tag: tag.name == \"a\" and tag.get(\"class\") == [\"obj-reference\"]\n", + " )\n", + "\n", + " # If it has children, harvest items from the subcollection\n", + " if has_children and include_subcollections is True:\n", + " item_type = \"collection\"\n", + " items += harvest_collection_items(item_id, include_subcollections=True)\n", + "\n", + " # Save the item\n", + " # The parent_id will enable us to identify items that are in subcollections\n", + " items.append(\n", + " {\"item_id\": item_id, \"item_type\": item_type, \"parent_id\": collection_id}\n", + " )\n", + "\n", + " time.sleep(0.2)\n", + " # Increment the startIdx\n", + " start += n\n", + " # Set n to the number of results on the current page\n", + " n = len(details)\n", + " return items\n", + "\n", + "\n", + "def create_rocrate(collection_id, dir_path, start_date, end_date):\n", + " \"\"\"\n", + " Create an RO-Crate metadata file describing the downloaded dataset.\n", + " \"\"\"\n", + " crate = ROCrate()\n", + " crate.add_tree(dir_path)\n", + " nb_path = ipynbname.path()\n", + " nb = nbformat.read(nb_path, nbformat.NO_CONVERT)\n", + " metadata = nb.metadata.rocrate\n", + " nb_url = metadata.get(\"url\", \"\")\n", + " nb_properties = {\n", + " \"@type\": [\"File\", \"SoftwareSourceCode\"],\n", + " \"name\": metadata.get(\"name\", \"\"),\n", + " \"description\": metadata.get(\"description\", \"\"),\n", + " \"encodingFormat\": \"application/x-ipynb+json\",\n", + " \"codeRepository\": metadata.get(\"codeRepository\", \"\"),\n", + " \"url\": nb_url,\n", + " }\n", + " crate.add(ContextEntity(crate, nb_url, properties=nb_properties))\n", + " action_id = f\"{nb_path.stem}_run\"\n", + " action_properties = {\n", + " \"@type\": \"CreateAction\",\n", + " \"instrument\": {\"@id\": nb_url},\n", + " \"actionStatus\": {\"@id\": \"http://schema.org/CompletedActionStatus\"},\n", + " \"name\": f\"Run of notebook: {nb_path.name}\",\n", + " \"result\": {\"@id\": f\"{dir_path.name}/\"},\n", + " \"query\": collection_id,\n", + " \"startDate\": start_date,\n", + " \"endDate\": end_date,\n", + " }\n", + " crate.add(ContextEntity(crate, action_id, properties=action_properties))\n", + " for img in dir_path.glob(\"*.jpg\"):\n", + " encoding = mimetypes.guess_type(img)[0]\n", + " stats = img.stat()\n", + " size = stats.st_size\n", + " date = datetime.fromtimestamp(stats.st_mtime).strftime(\"%Y-%m-%d\")\n", + " crate.update_jsonld(\n", + " {\n", + " \"@id\": f\"images/{img.name}\",\n", + " \"dateModified\": date,\n", + " \"contentSize\": size,\n", + " \"encodingFormat\": encoding,\n", + " }\n", + " )\n", + " crate.write(dir_path.parent)\n", + " crate.write_zip(dir_path.parent)\n", + "\n", + "\n", + "def download_images(collection_id, create_crate=True):\n", + " start_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n", + "\n", + " # Set up a directory to save the images to\n", + " dir_path = Path(\"images\", collection_id, \"images\")\n", + " dir_path.mkdir(exist_ok=True, parents=True)\n", + "\n", + " # Load a 'not available' image to compare with what we download\n", + " # If the bytes match then we won't save it\n", + " not_available = Path(\"not_available.jpg\").read_bytes()\n", + "\n", + " # Get the image identifiers\n", + " items = harvest_collection_items(collection_id, include_subcollections=True)\n", + " for item in tqdm(items):\n", + " # Exclude sub-collections or else we'll get duplicate images\n", + " if item[\"item_type\"] == \"item\":\n", + " item_id = item[\"item_id\"]\n", + " file_path = Path(dir_path, f\"{item_id}.jpg\")\n", + " if not file_path.exists():\n", + " url = f\"https://nla.gov.au/{item_id}/image\"\n", + " response = s.get(url, stream=True)\n", + "\n", + " # Exclude 404 responses and 'not available' images\n", + " if response.ok and response.content != not_available:\n", + " file_path.write_bytes(response.content)\n", + " time.sleep(1)\n", + " end_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n", + " if create_crate is True:\n", + " create_rocrate(collection_id, dir_path, start_date, end_date)\n", + " display(\n", + " HTML(\n", + " f\"Download dataset: images/{collection_id}.zip\"\n", + " )\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59d316e2-c331-4860-8698-c8c3eccaeb4e", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, + "outputs": [], + "source": [ + "download_images(\"nla.obj-2590820305\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da0a64c8-afcf-4f49-96bb-1693e7c038b6", + "metadata": { + "editable": true, + "jupyter": { + "source_hidden": true + }, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "# IGNORE THIS CELL -- TESTING ONLY\n", + "\n", + "if os.getenv(\"GW_STATUS\") == \"dev\":\n", + " # ipynbname won't work in testing env, so don't create the crate\n", + " download_images(\"nla.obj-2590820305\", create_crate=False)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + }, + "rocrate": { + "author": [ + { + "mainEntityOfPage": "https://timsherratt.au", + "name": "Sherratt, Tim", + "orcid": "https://orcid.org/0000-0001-7956-4498" + } + ], + "description": "Digitised photographs and other images are often organised into collections. While the Trove web interface does include a download option for collections, it has a number of limitations. This notebook provides an alternative method that downloads all of the available images in a collection (and any sub-collections) at the highest available resolution.", + "mainEntityOfPage": "https://glam-workbench.net/trove-images/download-image-collection/", + "name": "Download a collection of digitised images", + "url": "https://github.com/GLAM-Workbench/trove-journals/blob/master/download-image-collection.ipynb" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/not_available.jpg b/not_available.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95bb2ab12a30e56c891a562a12c98a6d3d8b62ac GIT binary patch literal 49429 zcmeGEWmr^g)CLR#g2Et(QZh&=N{7H8DIncBbV@mNcL)e54HDAbjSQWlprCY0BZ4%j z2neFzwMTs(@8kV`yno;0cz%GhanIh@Uf0U=Tx*A`smKz+;4myKEFyV1DGe+v+#2Wy z9|!!!Lh@z`_y_mFJ>`2?ShcZ)Kknm!V*yKDc`IdQELQLt9}D}jH5M-TgbjY+;0Fr} zCkN;6UvO)3&i(Ia=s(RXatyJs#IfY1?jgOfx92aUkZE-sIR}SaClZwH)4Qi8b&ueN zTyKWE4u410{e0@Gs;#QlunG4%!5a!!gX!s?-XwhLdd-FSW!%ft1pV2D)|TSdaus*? zBTrg7hi|2tOnzm1?O(GKVA*01J4nRv=>xHF{`XfZliJO%ElRPM#j&t)N&e?o#}#_| zkG}so6W^i)3;TXphgVDVzZbwKA^Ae{Ki|V#fd?5Fcw5p$WM1Y!*Tl#FPW<1SQ?ZJR zr{r>FIWYX^PPid{7yfrC;8V~=a(eoA!<4Fx|6L}IM8~=ReH8FXJPbxcVznMwJo}$N zV_{!k{P*cd#OWpR@bP&MddBSkd1w;xz`(e@|2!OhTrLi72-V4ljXVDc4WC|I{Eg>- z9**RV8a7V%MgPxk)c?5?5To#a9`66d_>T_!zZPQ?J`S`|K?V33!k!L25#(>5aV@s{ z9&L;jzfI;aXmprA+?mxW*XCf^$<_F?rEkQ8x<3tN4|O?=S%N>&H{&p=WLy<=`3Sxs zk*40{v`kDRWazt7FCB6F5yPvn*grb6)PaSads>wE1ZaX3rGU%FH^aG-m#ztruozV8 z*Pp+5H7+xghUMJGUlj{v11eH=TLT|x2uXQYD({1ZfKz*A(&(S!r5c6jiLdEa8@I2& zZ;Je@Spt{o>D}^P*Fg$9hhyFwH8T1_mv*dT#9_Y0`n)xJYTu*3r``mbmVyP3g=9tm zdk0LoKas`x?p%Rmszh}R?AufJ3@`^WPJe0V192Z3Frf=JG!X5RJ zShe6HUQVn1Nh|(wKmM*<+zsgLy6BuVQ02MH0)EH))Q)Y=KaM}YiWtMczqinCfJVhn zuZV>Hy}>lBrm~rXFdSSU@GXwMP>K_xq$oqk-R59@Bs<`QM+Bj0FU}qX0&` zeWbc~S-guhD2YwSVYXqwTZ@o_-Ro%H^Xu2InUvB5f0q)73)}-7ddyxtus>_sG%$9P zhiBjRNq6R1vt9nU1NwY$8tw>PTufJ$QLACa@%l(Uo!@?t451NwLf+pKc_I7$ z+CFBBOR|p?XTzGWR5eJlxqDwlWf17hJV)y2-!uPnN%4)*LS?U?q0dqmHfg9P z;~&Gf2nOztvCZ-X=!HB*i9uskF2(j@=dS_&MWh9)#4iGpBDiSK0eEhuy(7gv67eod zoSm6^Hp8Zx+@cH-Zw*)3VwKGPaI&xv7K+~|BPf?i%*k$xtJK z#sr~n?M;<91}W#^sDh0l~QWFKAYt$vl}#Q=iGZ_Gf`%Vrhv)* zSp^y>nNqrrye!asJY3OVKfdbKSy{)bx!QB+qfy}cGY$5(RWOA=K04^%^Z9VyirSKY z&rJP_bn~rgZ41waC1Rqydqpu0NypExfNjC=Pzj$;( zr$p_=Kj*V>fb&PIV|&5*WiZs_I#agabBn$!`83RW&n*@^{HggJ?EYE)IPmyN^PaMA zFN@Q!LK<<$`NONRV%7Am@R-)7lg)Br&t1EJmjCbhG`NS`@AEhEFI=KU%%q+tf?vFP zo1TsB?Z3-m0?To!jyeyP(?YOr@txbex5A`z0rmQMU0r=ajnB7D8QWZ^Kb}O61K7#9 zj|m!V+Niea$-#!}+B?CAKiisVZN^5nsyjQ}w{dZ{C@5Bh@CI zGSP79fuu72SlYWjAG>35w#{G1SPyH70bf}2^RDY!R9t1Y_@#Op+gh_8MERX#lP&X9 z9?O2?V-}sN40rW5ug_hN41c@qxKU`OXD28T9Q=aHuF*n8_Zy)Sl&*J?_-+%Sbgo)n zYR}Eu&Q6c^PETY{>(^FSS*CgQ-NUckBBNpz9ao*Gewc_tYy45st`Oj63AFm9z|sv# zhF_^G(@k7`MrbL`nRowX7`Vz*;Qf#6KU)&Avpad$WzPkQT4xt6X_ss7Z0*9*Tb>s2+ci8f0%?o~o68O0l=?2G01erG zU&p&61tW@fn67ydaqf)wV#+kXc%A&#c&Rzw;{f6|(k&X*>rl8aKm%u?&?yR!#llIw zk#kRih}@=Y81)zkjF?qvHz3{jXxFIKV<(Nz)?;tM9$nYR+J-FE0BY2J{2?+OVf{2I zDXGFrjqA7bNro~QTqua~;ew!rFnjgnuJ7Sh`zkBJO)1z;iWxe2zq`?K(V-`3hMe(- zgMJb7{$$GiuWJM3R7Jz^L9`#G8w@y{Hx@upN&%+_G}yQrO5*7;Dk-<`fB&*7Gh2DR z0yrF-)$|d4WYZwlT$^{@hc2+3jU4>3iSG@5nZYG*YJdlk3hVHfMO*~ek~0U_`s2Aq zHN|DgHghEMnb=Sl5;=`p482#AWbkw<_4yp;1oCBVuiAHRg|hM34d>j8q~IXCwmI|d z3k%6_7w818<3y?f3B8jLRi~UOmwWCg>JIXpwou!``t@yn=8_oHor;l|0>fo^!GzDC zB3C<-x~s@=#rifR9&sB25|X0H)4Y0c(De%!{n<)ICFagDf>i_P^mWNFWR>B2Lyj4i z2k%n2XVk_G$yxND_gYi)*^E~i3Gav}#~9;JN_zT(U&48z;3%*?kT@>|SVAx! z;kILIqJD*Lt*HMAiZ)+y^*tpoZ~#CWRCo1Xc6>m}O;8=d2xrmOFBf=A_ghd5U|Dfy zkg*(gqd36<$&DNuz1|oa-!C8g;@mvNj6L34&bNB<``qX``L;E2c}&)}>Fozg>xg$C zm8lHPT-d|jvg9u%gap(DKlPybXnK-y zRHU2^l(y1__fvUH6r8XBUQdJO2Ke5*_zGB2z*n{!J(>|b+pLBUc4+tEj@m-CIY=dhWQrBzBq7@ z=Pb&Aa^bd741Xs|U{nV2FMcG#-O3+Gkxw60Md5C-IbI5kg;z&fD9I4SudKVila&Vi znL^Sg7+(+t$_n^uAruejQ*ma@tw4?hB00|t{*G5^Eld76&2pXU$z;JIVKZdk+wOFJ z$0}Y4fy{12*GTXZKN_nrEoG$%E-WnYY5^B_;viSM~4f2D1OIgLn+UW$PBj+GRn7 zL^9l!)=QwAEtmV10C<@}Rs>U7OcaQ!18ID<$lOC@8Ina<*x-IyZ~uY1SPQ3CSz>VE z`bsK{u<}@~oV&g}+GBmBb28T5|HG?r-I=%p5pq^!({dPf6fNn~1JO||Ol*4$l$sdZ z`x>NGMA(4;vk1|oDa z6P}Wvcx+GC+D=tnBp15>5u|bA^wA(y8#`VdH)xb})5s%0Ap3UvbUR$;Pl>X?#rCpGzcF&M%ea_YR;I-hVZBI~c zF7e8;gVhGwb`m*mp#NmP_Ystk3Ejx?j*TF2f0dme0rS)LH6o@J(Zx0h5**`s`FZ6v zIfcF$TSX0wQ_H z(GlUTY&_Mbc9?5Y-=h6iP3Iv$fht>wWSs0BBlrS^5&VKWU=u=}Mkit5K=d>^d z^RX<@ZLz)JWGFmm!BDD+85!3aiCvzIAAtPD`e>x0qC$>}hr@^gt$(gGfxLcit1p2m zVdBhcAnn`E?9{r(kcE4+4QKQT&_+I&kqusX zYZW%NU(st#$z|LIl86F?vGBk}DxLzfo(TEUOa;tk{6Gb=otT0ndE-gn6HW}%yi6#Y` ze4EYb#@IBmq6;Aq$UC#{dJ7X#{n~Ja*2U&2b@$uR~Iyr^S$H}^w zL@tw`Md=Qs-4a7K1vkLTv9QGeC~~sQF?=3+4p|(TWI!4FbI?_Ig z70wc&p+%l&1iIECYTWmMBR|LTJI^X;2Rbs-X(j{}pszS;K&o(=LFSsMpTI8Dig(wF zb%g%)X2(TQn!4OZQUpQ9q=Nzum3lV~#D$GxK)whPR~hO& zCq+eAxmLLqnu1)^vY%{c>Zv%#436vttMT^z{M@>mv~$Jwa>dH&MTJ=e2REu@Bgj}r zfjd;>Y`*!oIE~ALg5k*r6ojHIg?2?z@ggio*2Br*Pd7nQ57IK_BUxJpCgrq`7OWLF zZuNwKB4xOASD(PT<;C)9`Dd8zr+2COwFCRDyG3rGMsxi%xSK3*jfvW#WFTrUE9S~W zQ9Bm*j{QtQuEZFV2HJjBJ71$nwO|;jyHnt^K79RghfVQXE9(>c8FXo$)Z@aQQ?G!4 zpCG8!kB^~psKTMC6dkvZV3odjgLN1>Gt` z!Tfp@D{yD#Q`IJW3LG}}pa9})9X%~Amqhm4czb*6GZudfnHlKGNU|!iu~5CN0wUa3 zeKb>avK|Nl&(dC&GEitWedRG{zxS4l{E5w#(h*X^dhA$Z9}vY7I_gxS*e#%vfaD1j zd5pQgK>i|?IVU_`ISnz=El8x4nqKO4a$wyP0=MV&|8g&rP>lMHom%vuNparuN`pq5 z=sZ6%3L&oBy;lx25UyHcvyF~o9^1N6sy*pDAO)Gp)OyPpPOwtQAO__+|CBKFON7`1 zn;_8((k8dDcxU(s`4dD__#HcxTbD9}9Y9b<_tOkcJy;vYgM!kc^y_W3#z;*beRh`% zt7jGE}Io2Bh*1NqVM&&o%sPqu0n&1cNTZ z8?>66#jEk0Rjp=fA6*$rfh*V7 zDLO-v{b*9p@~48^YkHY;Hi3jF;5M+CkqWtiXtbm3k5U+_zD&)QkbJoJm)i$blp^f} z3RMbLD9k6Y?HqfhaY-I?%V1zYgjYU@LMkGJ6OkEqiH`Q_b4_M5pD~pl^|YdOhil}! zmHsB0kzkk2%Ij=CHM;778S5*8$i_-JKh%whpp=SA>h=B$(iS0>AumR}>a+|W6 zqHdeV*Bd{wT)>!VIPfjE7dQmb@+*25Hzu#K6$-=MUP~3hQ9= z(fJZ65qyVAIJrlQXj@UB+M|q!JJGx8@EgE)fl`|)3qD5Jy!s=*0|~>A&bqqM7M}=3 zZF-VO;F8%DK0^BOgEMFGl$=OL=-xf*XcVh8s2vA(oHYRUm{eVTr8A1gtN z+o)|MG^a|+z}Loz7T;0fL{Do_yR$MPKGK!i3q6u*{ld)X=J`YWYeIdN8h7sN>kb7D zPca}?Tp+j^4oU`Q&q( zCYj$JAeu&b1bZAM#*U4h&Ryx4M2r_?tmzafQMpTT_4MTPh0s%clY8uO^J4cSOi1X1 z(4#7`ddTg?;F8Vw|M>j+vqN~Pa~XgkZ5VSIy@kp|zq}OMG&;4hNvDcllO9|>hzTo3 z6l%Wxh~U{kSG*b8VwcU7TmDc}%y^`*@b)V~-TLTb@4Y!0SUh1Gx(ZB# zd&2FM1rwjT+<_07lE;+M`*KV9v7#<65iK(AXqG3$<-_>txqPNG>UG{<*WewRxL5XW zTWvqlb}C=ya(z)bky55rZZoav?<~4qTB1=~c%HUhNFymQ3RLkIAxLU|(Tqqyb|z!G_AV8K?Yid651tb13>!1RD@RfHGh%zw+C6q= z^f!u+UsqP@b+c)sOTW~oDAREJR6XL`E{#B_^0ZMo7ot!)=4}zTSA6=D-f8Dl5j1l4 zhCCR|7zug|bB^ZRa4S3|gLH(RgdNyI#S1>jgTQg%(W$rZ2OHQA3=N4ne+Y?D_AKw( zcYzQ)%ECy4x)7aQvgl`^w(JSf)6;99GL%Q>vGfe-M}2ad){Gq05o^_V5?f2vl6IUVhM^E|UiH zmvI3N`jKyh(aqLNB_qqnaNU4Ax7Aga{JzbH zss^iQB+CXyMFU7d)-p?-dl!mu2y7yG>y4bqSu~0vZq}i7AidCJGMubT-8qrXpl(`0 zk4Xb{KL4;CA>^+v9NJ(rq3{VNL}j5F!Vc?6Zt&;MEa}8SS?Ick8Hq$nyqq%QZrLlh zrs#))8;=6W0P>CbGo`VD5-C%79}I$;e||G;u&DLxpzyb91H4|17?lFlTvKiL`5uYd zkE-mkG7AUhyG_#NT0(EtU&KnNMMvG5RJmIio)3~pWo_KTyH*E6+SSIH(;oH9?UU2> zJ_STpFCF7CJGLYVw34oaEG-1=-!vi0)Cr(24$+m?$o<1v$Ej_F(|)C*WKJV}>_;v6 zbgth}=d@@VO9*OZR6U333$zC|hVh7eZFG5pcGp8U@9lg`>#C~~$#V>*Fz#oqJ$&4F zPqN5tL`7n`Q4Vs(|J|({u#0q}bA8*dmR#xswxOh4qG?HrtGe>#C9*pxqV#4TzRkiF z3O-z$6Ed%!2_aKS^QYmN->ce<$*=qJI!|dEv=+0vXe+HxW>X_9%7~EU-)FUDZvf>B z415S;+1Xc46al1_${=3jDJHXrCo^RE`kx*u_Xl0LZa+@{wBh}@^Apb9cv6i}ZKEoK zMwKA+`4>?R8TtOQ?(4xg63QtTttEr#Cgh?At%GfY1P3gKRce4wnw(SDDN~C@w`geI zkLFx)D=fw6zeN-9mthaF*Fgv^2n)0livf5^!&+!$B&nP4af5Uu1zgy@5COw% z*zN1$;wXtE9w@r;<56%K|LCDKY0}7>bhBniy;-FdIZg}!S z5*C}GQ<1Z^mhL!7lKM68Vp2{mUu8b8|BW8ND|qbAmEH;!LUgOiA}i``W6ItP3`_*K z=(3JpEoyOeQyte$%e5{SqI7=$+_K7V3o)zb+_7NCu9bjEKDvB?w(kv;UWRgQNz=I| zXZ>Fkug}HTd2CH8e?7Xfc+^9)gI|Vyi-@h6PkORJ2b2T=U`lF0bNipqXIw9%$dtRh zQYDyXlv*LV`6&1K^XKR+HK`Zj1SYm0N*1i26YDEAy%&KhL5nAlF0`RqK%iz}$$3#| zJyz5_>P8bW-0thC6TxHjq<0cM5M{&deiT8k27=~EeeBX7L4(sCo?Z$t+CJ)+5lo%+RxO1C8g!YsH?qoYw6unj|Gs7lFs^?GMWow|Xd_1nY?H`q> z$*^$pHN+TIaIpY`{&;bLd$tSmK{+EHKI)j6iRtzCqF-r6UMtc3tc>hA9mpo5IxA)S z&Qekq^iD+cA?w<>T0v16+{n?Irlo{PMJuzrGj6x!5si@B?U}$hIN^_y)rVW>l}-+!SSLIUZ@uN`2ML9D}d!N%tx~sNiM#QrHkFV%!7HJ ztK_C3!^~Wv(19}{GZ_f=5_7SkcXQ0o0SYhCYV51qU~7!ia7h~5>+2z@XI%F#N1lI$ z_82?}=HD8YZg<&_beIt!l5TKRv35=nt8&RvDr@x}6 zGSVG8ngO7*sk6kAIYZfWI278UF4XzT+n0hv-wiOvpH|gyFgNJT;=lvAr0}bC1B~SRr6%DlB z;N7FSvFDbG`S#;|+tr;veE^UEF+7tpHlY@v7I&*$*-+Mbru!Y>Er8N?FdI5AR=C&9 z7<0bZ3Df2i#|~gMi#|<=h%#kZ>d*bCW9Te>tbOZFQenW;WU|A(#l?eTGR#ow`UV&KPEub;qY@opcP(+r z+yydnO4OiWGx*R_mFl@fDEe1K*wh_|x%K|ZiUyjgGL=lP+yOB_*;u$)7HMCxc}Y&Q zWDE>;Hu>h=S^x;mxIF-Fj@aNvFTgeh*+X7q(7X)7+Y;+@aTsAu;iBX7Up{Z~+}kZ@ z*uLh$)ccxmOhXZ7lsa&=P=;e~Je&*=m$6S@CmBRI8nL`qyRKv6?{3;`-mP^_$c=gV zfj5S(>XHTypOTveV$+;hmz+MQ#BiekoHQm=u=W+++;Vm}>wb_ovc?8SDm8&Ng8cg9 zHm98^p8>up0lR{)o&ExLSg{y=*9B#(A1SnTbOg+0&ZBT;{t0X!6$QDC2Yibf=L!I> zJsFU)CX^RmDkvzZS)}B=>PF(agV1-GZLp^yzH;K7S)eY2Aya=a0$0$;FX0Pts(ic* z$Md}In@Cs14UG@;sHlSl-@S=@n=%H%4}bmu4wcJ1KMiCYLe~a7uByWQn=$PL7Ky;` z%d|TPfhLszN+((pR;g4$*P`sx(s?~4e$|m2vu=QRQdgn?{Q~GGumTy|&-a&ws8;KS zL|vzEe=>1@Otj>TalDG4i^>tQfh7j)%*Nd|K2YO@UZBptIj7QdC4otGa^oYxGm^KD z()h~ewcX=dMu>FyWOY_TaAFJPtrH#qyci&0#I2eDUweFOU*P{~OyYpUQAlK7u3_)d z2OnK@pESR&SJAjU-n!SZB~rp%6aNe_CH9T;YfhF{2l@q-#~z?R1Vtf z%rf&xy4Y5LE9AEwWq{H~h`#_LSiAVJGp{c9-^m3W&f(6B#mNt~JF|_p9fdO+X+|Nz zZWKLdM~fzaxy3)4m!C{)X$@vX04nC!5nYO94KK0vwaUt#DI=$cdTNk!!UvjyNSRaw?7}Qgj`rw|TLWK?dO`G$cvmhK!(T24 z@RDWaRip102j3dCc{w|+ueOQnD3lnt`^X6bToH;$peiJPx_j~(xXC%!p}XX4+HUP1 zmv8`RR35J?{q04#wbNbu83YHt+rs6{`?`e+Ia;xdTEY)s1lgFO?fB}N<>@#Q#I@-ezh zjgS(Qt<<*+VDqDt)`037q^?NKlMK6fzoW!eUIFr|bASqQQ+(Mo&3`0Jn6_X+hyKMd`=Cu=&__siTd~9R_j_-hRsX*?ftn7^B~uKU@!~NMznMIY*Hf+SI}wUo-` zqOW;|EcZxRDA{#M3Yl$NRrn0J(?@oM6cd?qZ-quAM>|;Of8p~-gKnmxqq=a3v}q1y zQkpwpQJfkQp>NRkwjG|!No^;MYFWLQMZ3?p z58iK(#-aDqh{`p&^K^1vZt-cVD-K5O-piwk63WQ;`~F1+1)~Q1{QyOzeV6BrF_8Pyi~IXh7{?OKE}0E!J^r%OPK{)VrvNC z6(+d@ugF%T33Ba`>9jO^q2vNA zXES0N0i_&EmF({}zNKgYMgJktZ!+h+;NIcP-{B|q`#BqHeODKt`$mEv_h<25pUK+~ zbBEW86ouyl8f{qp+RQn|9v*{^EOA&`+XhY-!l0mt^PY**#hCHDnWxDdc5a>=c^GZYyDL>;nw2`e zr8I>63!A0<1VZJ5_SIwqzX@Tc`DF~F%>gmmU+PC$0QW2z1Sk$`yc~ni;bw|GNLb%f z=0Xx3^Q6yNM>(I_2Kc_2+9}_Ua=+07xbgv(QU?9&AYErgmY7y0Pb*HAh68#m_U^-j z4B%wO*caEmf47mw8G^Lzlq|-Wmghr{Vd}bt8-DtI6t;vHKWfsW z0fmn1Q*c{jxpvI}PUHU^Q1%zKc0|*}8O(9VsssUguw2V{{`K*bTsxtU44{Un%(5ZP zS!nmAwqfnT;L%M0k6ur{xe6IUFJ48x>&(xQq_hf4JE34es~WM~4w68BEl^>Yt2LL! z8M3iceyOKbhUE5-3!5Zm{W3nMVt+nCT-e-Cin};(?q-%;Zul#Gp^|k(?|Ipwq8)E^ z9qLM_{z&?3BcKmhL4hC<2@EC#pF!Iq;z>cE+T-KqQEmIQJbNRw?%;ag-blKByN^53 z6;`3>tbxO-nQDe>XqdWWCj4Igz=}fNq142II-XtC_BVK``~~7`!&QQtksC&?-PAdb z|Tw*+A|J||W#R8tP@QyoF?5N<89iq?*)a6ZlPxIBeqLnu) z@bX^^N^LZlsna+veN4Gvf0jpAsWnEysLtx*r5MV_h+Nt%D8*~I*Sdz9*8J+lm)g+v z&cRacS_x{nvXiGo>!c3T$Pl*f@GTP)S%5eB-44&fn1in{60DusJmEkil`EePXHuG6 z4$-$O+GVCtXp)6lA%hR+x!Z3(2W@Kb(eW=mO?PCY%S|Sc#(35<{^O5Q6!I(R0*-Sd zsXw98)^{2g2e}WN+ubl#l|0}^m2QY)`UMDJC|QMQk`@7EVIXTbcP@@P2i|zigWzkF zv}3pP71YobHN*0Pc%tREA_2oW_R0uv6wgXU;3bqTxRj5Wj>S(Le_mM;BI5ChC3~>| zg1RxWqm1JvPmC*ufjhCsJ-G-4X*xUvvM`_F%a<>6HW}KlS-OD}X=rsTe>eS>#iXMx zCdWxYrErJ4{#&F9Qs0kpf~f9LQAtjFl0 zLv_n$S%dv-ud%Qo=8jLH7SsbB@pF*cNZ}&L@>xRl9gA2VhKo2JMcug8bG|Qv0OYIX z*4cxeU*;Mevj?@NCfQ{go5oA5KzXkfxYiVi;XIiEekp6d+_A(5UnY~M%Z{waab@PS*elhMIzspD|5*#nrjWS z4!kYZvDfcodWd-p0b={I6Uz;hN*V^@*ty+IJGC2T{0L3;3yHQF!>1`~#lp;0x{VH* zUw2Vs&p>qcs^N=lsI{IbOG^G>?Kk0F$B>lcT?0Qv?pr%dkI6fg$7v{DVAH9(R(kdu zJUG?{Y(-;rSQw=4+yu;W!#Cy6UL^`QC`Awej8t+|c@K4E;o8n}Dd{Ko4<^TzxEf@| z)?Nb#lk5POo>_ZvdbmS?;OUhmo3jb4IV9!b-%fRMD%ZR~PX2oO@^cJga14;=VV&wn zy^!Mw$45BK>9V~yoHLb)9;Qf3iz-X0h-TYuE%OO&O7I;926~+yry{Ab8 z4op?VNe{_LS&>d142n(+^_(igY3umdG-#3xhZFtWm_yr<nUk2toN0aqDtN?wvkRpX@Pf`sJb{kzA-AzmR_-?&k%%DEm%;(`VwedHUcDN26%(mAOqWycxM(HN#D9S?1rwW(#nIZgjhl6J}vh zYjLxVUbD_BJ}h*r9bvV98Ab&69aj*cjO3_c-YF?fQCkUt1;E~2Ge55^nG}1z9__0f z6?-#InPZAwfuj>CzO~zm?-14NSf5a4qpo{3WA{US@GR(ZV{4j7zO&r3xeF?f`$(QP z4Gm1>Gz5{;Ei(}Zikt-ms8>axO-8?W&4&=tP~`3pH~#9fr9M$usX!LpBrsF-M%$nc z^-l9bM{52B$IG%y?^Z>_TKNDGM+#^T6vbDrNK>+rMeM&dxTK zNU39zs}T?}^G}_i1})I7HL!y~&lk!>AlixqUcbF}eSl^HmEL;yd9TPN8leJiRY~W> zZ-sq>gWBTdw;pm5yM#TYA?MhViU2qfb3ak%mO?}?(p}1ezG>pdOQ^Xn;yGKWCPcdj zio1XtC2XngKuU9p6G0S1BV0sEX~?Dj(l)KAWaPT*7@7TS1+8^$#vNPd04x8TA0VAr z?~;_}Tl>H~QCQ12(2|%Qrh87SH6VAY@+j}~eOD&zWG<5q&=>%7s4uINjiuFR+VJ6G z#=r&iEI={k?-xTeDP0wTF(l($yP%ogqa99EyNt(b9Y7$%*bd_qfsOp@dkOccw)r@ z{X(3h00EtO^Z+=^2r;6a0@fRoL=-=s_d$?w0fY=Fv~$9t@s(E$!v*rBo}Ly2RdawZ zzs^<3$qQG&(XwC=yHw=r2hA9!M#lFIs1XU__is3|K7Dv=3*rX0)|hZLUVGfgOhCnW$x<0L|!6ehurtjP{_bK}f_@A)=CYXQylfmUhA@>g-prrZ< zM4r#WjpWgHcE*p(MxNz8t9q7akE|akWg95M0O&yoz)&jHQ7U*s{=gR-7@;0n%LBJ> zY5h80`JaW1#w+y3-u6Xo=HN(F4ufnw^ZYABs8+6ZcYJQ}EYv4+-vV@wY~P!gONup) z&@WFAc?UMJ!|yL{u6xWkLiIjO_T!TP`r2km6K;H2ofQ z5j2KO8M;Q_K#NDbrc6^PGrvlPwe+5L>-K(X4;CY{0>RDOXNU2+&mzO9qNlSg;14$}|8?l{YRb zD0Ua(n1&jp5th;IROi2tB)ZOOHoi=CqLUvf}$9KsJG^xFzfB?lWc>Ee|}1 zw(yIql`85dOD`VP)5VVHGh+Y}`cRPLX|7w2AR%4g!=^D2^*eULyIz%awVpon$utRdPci83H6W^r>p5&$Ok)??=#z!!ihz_`zwDey0tEerxsP{6^6>`ZV^-A~YM z_N$+*+Txah+dgRG0|o0ENX$m+1lvFbY7iPa6Z`tUpvdori%sy*r9aPGW7@Qu0`rK$ zHpVc9Q(pd+01S9Uf@X*0SAOc3kUTdI7DUTw3A4Lhdr@pMn+~U@?gkUIJ%O26*twZ7 zsMk)Mkw6J_a1Le&(jMsw2itT~(1hhuMbYYHUYW2aytZ>z2Tk-;PMqfj1$Fws)yhyz z!H4?;u=wkr8YO3;$BM;854>w~UO~T%kp1Rwq}=!P78o&d@4NMkS@efc^sn#Hfej%5 zNmGq>_?1uO-r}~ASK78X-cOFv6-H1zT8+*YRYS|Mzy{=oy%pGKI>E>n!fCQ)HEQR= zjI&UVHDbHumFIZD@QbBSX>p9?xLU4Z})Cf&F%y`K#=rnlq+*Vh-TR#wWC!Krd2+%nw`TG-PTK_Y@^B)@jDc;&p zwwV8kTmBds;dbt1gY^^@jdt#;{dS18^F|@6w0MM+1tC&iDGQ)>qb0%{=P=JS1NFBJ zlov2S(ze9`snyu^jUvzHsKRC;_6{%%qYQ)&8tN#n^?s3qf6@bqUzn3}HtqWk|5H$B zlYxn#=vx@I4ipy$xuf_COjltSKZLHOgY*#eySjf?w)s+pc!aE@+A=;a1s(rkBneZ= z7L$JaOZPJ6YC)TJe)RCu!F!L+Cvg}|{vDNCx&ZPWu{OLJ;5+e;k>bK>sFK@y!UIwg z>Q10}*|H}r;v(n`E$VUXGjj6Up60%1Ew1&8)pV>vv7SR?C9~M}&Rz%sDF~3eZ6(aN z5?Qru0Lribj_THL*b8?QS}>9E4I@b5FT$9A3~$!s5mH<=YF{fjC)B6*Ik)$cyGQ6Z zfPYgl;mJJIHrQj9n2bHNoS4(c3j$aRL4Z^OM|JZWJoEY0jP;Xm-JyiVN~w7(Fe5HO zwH%>Q(9_9Q)pR4S`*)}12{bFRC|oED$@&xrOalS9bZY5@l7s9dOVnR~CE)w_?*N!SIa&{;%RI{wP6|q{c1oIEKiT9f;wf4W z-?cX`s~L{C#79;XG~5+y@RpHHs|;cBhUCmPMp3=oOd@n^txTf;kWTg7CoeMd<22z=0~$MLwf72JQS81W{WmuJa>|{0uTt!=n7q zv&_8Lq8YJK%CA@Z4e&nz7MZ24c^Ogg@TCb=G^Bs#Z$kp%6 zNDr;;S51dpvwXbMJb< z6iVA;F{Zos3NA67Xd7zeMl=mwT;Ap^j$F<@Q* zQ6*S(W1kVdfdT_Q^^OnddtUX-HMxg z1v^(#{C7vPlX6$Y#rku~fUe!R&OQaFd2nx{TxW`aDB_%Zut`R2OR7=O5wOPkg0TUK zm;IwEf@v4ZKM&Oi--L>7{9|cg4P1`F9+)FsLJ$CCHRbuNngAaH7`XF`3e8);ec8Af`K0H)$%JSCe!+Hb#34$rCXH=1WrGn&GrV7)XzW_>>zDhGAF4>^QqS!Q1=I0=!oI8Kld^zD61rwr z&N=mvid&&++*=ZQzlFY(1w`hX{5z;GH3wQH=*Q=hU$bc?J4?wjD@!0peAS#SMA75~ zAh1dvMrX0_1#N+GPpiR<($BiC?i;9R@G6ZeV2giecZ0k@dALH3X=(@dhli*{-d_e| zBpngNT!tS%eX=c_1LOU{zv@NLKNgQTsGtkwgzY1G)Ws%4&ts9}ZptzkwHylHlh_&a z5QXOYCq)VF(rih>@O{5+-uk3VM^6WyJ}^)om}5dqB8FgJ@(lu~-HL ziL~JITcLnc;h8}yUjtZX=;Yr~)c4lB5z*{b3ZjIVssa0uYd6FMgI&8`nzwbXc^%ZUSv4#B$; zj}4s+=KaVp6S;itv9^H10L9L;O@l8MAC4Z0LKD+a!tyq~fDW?^{3ebZA}g6#+IH(P z#zw-Q+TJtOT3}>EyYi$GDT>iRFK8@?fHw(qKp*!U2cX*7ybo0NJu0d^3w3MFW_-{< zHLIQ}1z|4v4;%ok9=0*|4zXMk&#k@zJIuYlPTbi zl*m*dCxL}?9SG(+y&p8E3`qus1G+Ubcq}7g*rJmA-rpU7gV~)eavj6vimO?eX?kYe zz`z<)2AAJY8z^4W1^V@5qv1)77n1>%WL4>neOYJXe9KRvFH?Y_F{1V&0SGBP*} z8UP|UqBLdf%K8%oX2 zrt`J>-6Hi0;1gG?uYSjbS+ERb4%l`fks5%P2yx#y$f_IaiKO(@#!|%;?NX#p-+$T2 zmPh#hxpdql4rnlGT8R+`FThwa;{v{Z*UXv>^JWSXpj}sSx-nx8Kq_*EIUv>_Ek1k* zxb`btA7)h>9Oh4E3H&hPaMA?6M_IZ5uK56tROj@ z49TnVVQ$a?xUAF78bC^dDUEC)ch*`B(5VL+b~?7Vw_nkIV!?djzt#zjbvQ0|KtpJY zoni}*>jeim8s{`R1J1nYZbIc)ptvE>ENACxhE`uadW zlkUMBNN8G5cU!qy*7N?pmt+Zy0pzkp{qDL3n~6sVz0(2CX9KE{_~+Ucx}O0^4G?6* zzwKvDU_aNZme5dwPv4&=QF#-#_d#ps@bH%$0|78c-~EaZehpAQh>JCJ*))fHO& z5oitNG^lDpzV7Pk%4elpq06mIY7ar!#5S(#rx)Zf@6))=0NO=+lQjOQ&2O#8UL@T2 z6jmpC>CS2(JEa{j-UumV1Mph*LQv;OyPOHiNa z|1B5@UT21K&-tFB@mYlE}K{$-WekNxaACJva}L0Z5wv7bkKI@JcZ;;y`vUda6cLcp^= zPI5)7p)c12ozS|UBcJyt9^}Y8gn11+W8T~l1tSv$uiF`c{YF-?f}+R;+Qb~?(&_fz z*NREysJw*J`G0TJu&k39l@xW*Yc>D8t)*yHJ=#gFy{28J{j)A}FXZFrla=oh=~H%^ zz6XoX>a;gn0>)(C6{n_P_FX=imG0yA_uC z%X{;Y>`xbyCojUynRM878!ZMmt3PxJmS5TxzLv4<^|Nm&Hp|GM^2B9Qp;+O``UcGC@Z&3Z|mAC z+-Hlu-?KR;P%_&zPuf3Dzr9&Jgy-QnOIaFvy^)dVm)~mlw|E|d4Tn(U5nF_Jaw# zV5La*j#sfuG_3X$(*rm@!cLE0BW0?_vVDm-`5o)!A0vk?DT?OK4WYA)R!OX0jaM7H zkq=?h&*PxLT>#~>LhZ^YU~q^NKg!2nqntR}@g%^Jvj2|fv5X0_0_6moV#M9`wj0jqGuiBn>u$Z&;xg>%6LvGXT(?NNM}~vb zHc_jkKov)#zQ(PYJu|c97n-BlUwnO`sZb10!k83Fnf|3<=+nBszI<3X`)M#sQ|b7H zm}tOi>`Ob>Y*F`<5$azEyJf|svS*D0rni1>%)hRPlst{uo@sHMnruHgdM|4%=zkJ5 zTD|Vv7q@pfx`E>kpI|oVa7%PiS`=zJl9NNAT4ee!;gfM{9Gj44`Rx^Q?{y$#9PeEq zm@G5d*?oO}{>vlLAdy-VGA=!l!*^M$2Vb~oev<7+{eD*xDad0Pkt_fJ^W#t}K*h82 zvHk2U-h-bGK8W~WH(QpEL)*HcS{-osS~qN@Ux%~!SDKMeZN$me&|(cv_asgFVauw` zZ1-K2DDYN*6)Oun_mhtKZ|mY`o(KE2H>6>dUWcY+%v_Sy%met%s~BZ1NXHapqgpv~ zEgn$;P!@^@BV(i8i8YH9HTk@qUrnL=Qn*__5`DTc?9!~o&?blR>N1V zi?@ETEWLatkpN}-(|B#CVjpT7!=diTKvi6{na%cW_V)sA0{%yryF)g-{=BIO4YEU} zQyrxXm&Lo{yEGSny)KiczD6Fu{aA-bB4t}g{Oq?Q|EIem&=W_USbU5)FP8ZG%JP}_ zLVfQ<8u7L$n(fQ`^(t-Y`+r=2oDa^(ch0xy9F+9iaqC;p*K5(EmR8XogrSa=mP_Y`n#s)c(axf&C+|(+~FoRyh8D z?R{rh((U_qw+&lnWo2n;Wo2nf!%;c2Wty6MOJ(js=0H>+votd$%TX>;D>DT&2Pz_t z%+d_^R$RDHae)E?&lm6C{rlhl<9OaaZ=S<cQ^M_A zfBb3XAOE7k=KP#Ab&<1B0Oz|0&>E5*+6ly%)^oNRp9CuAUj~0LveFcyo(sO;sX+0x z?d>xX=g-XJrFo0QVrZ&t+7%ymCjU<0a&7Z=&1-tSc^53tNRzZ?K&WyP(rrvI;!0p8 zQ6pm-9;D76edV)=hxA}FmgsD%th`g<1yh@OPoEcHigpV9HU_z@eUQ@)zXw$ z=iOT<-(tEw!jD}YE$YCZySN-K{WITnY&%1#rJ#5c7sB@Ykh08I3HaS>>RDhN`PGvD z+~v{0!rTSY?aN@-NxRxmw2>4KcCRv^ao8YSNT&eK`fQnQ*)WEIuVTmXxxG>$*O1~( zb@SJO_-4cv2J%I|$M#1KYf#Y$RwiFl*1s+jK2NLkc7Ml~4>X;1$} z2lKbxzrS^lpgiJV?FnC;er^7FRR1!mw5X=$)-I`{#w9j5oc4W==3Q3L#J;^gW4qXb z8)&&IzWG1~1%nQmqjsjTCR{-3k7k}UcrAKdG8}ndjDIR-sc0j;YTOBnSh!{~%w56B z4j?(tQ&!6zH*0>8`LllmGU4KKCx7KC>X_x0@y_m@^D*ve8onxbpZ++JsZ{|PuhJf? zd@ipb8pmvWK3oa&I0auSmkCHh9=hZz|BVRvH=ETt({f?k%CoZbY;HfFMB=lLY@+ z*w2Dr?3Lfw_%J>-KeVSnFFu5}NOFz&a%vI-j#cwt>YE%H6P~pZ=Rl=byA_ZK=pWyt z-I68d-^?N(0B!d){ja><%gylXHmNd&DVte+)Q#1J+zm7P)DXY@OcN$SSey{Tvxyd# z+tqe|scN#SQ+&TKpT9YILjO^+7jD^;f~2==fiFOdX8K=$mx+ksk$5B|FB9xP(|LY0 z!B(0Q5-=J?3c0wd*2JHkwPd zssMy$A0OoLRrTW2S`7j0ebi2y1ag0N8F_FU z`U=bjx%FWF5*@I8TMEmhm{j`vobQ)yl!6#L3d)#<*Dxw`BbrkqZgnK6*w_+%kk1Fe{V6E~LFL5)5#oYq1 zIu&V@Gy{pBnHVsH<3k$2`?K5=Q}d3s?6t1le_F-cl=5;5qiq=hwWR=PHlI=8HHM-7BgGg(Sg)-9}j*Gu0u8%j85T<0ebn3eZHrz3N3SB zkPja}Db8D-==~%LhuH-xeK`w?Vf!DIC`@ZY8zWdPVbuD>ZHn4RU)D;$Th*C0PxAdy zAVRYZ@*W^(wL^p??zjg0vdpwC*6PWhqmZQCgXNjuE%{67Op9$~ejQ%A-TlX$U*ofo zDF*sk7HJuO{xS}Ev3}k>y!yEkZ2`%5ir?tCAWc=o^<-=!rKCHX*FI(Ex! zHNrdOgQZoEEwg>qeF7bvzO-TTcyLbux<&)yBuA8 zZxqNW%Y|z(I>F`H(N@*1#M{lEKb!iFJ?s3`5iVOi)pbJM_)KhZs2kK@Um>dy{%~5Y zSx86WPg_N9N7yx~l7RpVOlgA0&3v6q^y;M4Oxi+U1gX%AwAcRdF{RE#-DpumoJqm( z5v@s_PygU`noVmsUz5qNZpFlj@|87PFq>M?tY64#d|@-9IA~9-o`*yL^A}Gef;Zm_ zm7%V+GSUV5MpYMvU|t5SBiank9O0Lo0NbA+lyLfnxp(=`-m4g`4m&HBOKD4Mv%HwLJS_*PYJs zZ{eLf+X44$P#R$C_~kon0Fc1jTT&j}TbD-AGD%+8HLi7G1tsx5GVYiOyNY=0N4XR7 zZkpk2%u-yu)S0k^xsy$ACj4sy@fKn3Lt<)lgy-v-J)>7d`ii@1gco9@iy~jgms0xs z_2Y}*U(2x_8Hm25`)-jD((EB@XIvT%-{)RNwJ<9B&|KW0XO<^1X;D5ouyk_3V)Mc> zDJyvJ%8CxsYMUqHP)b@Q!-KpN51;x<>+St)8=16YH^LHm-Ok_h}-U8u|_T3etO#w{4npc;6!Zj6lmZ08;obNTIo&EH@l z^EI+`S4pL-W_1<(iC<~}2AVi+vM&w|?PJEaP`|PJ5YaQ3ZP&7BD}*^)0)7U4k)8Ge2U1A2xN%bN=FY7 z*b!ORWD>Do*Z=5vOe51a-5uSfEu|>9GKJ!K@8{7>_-8?Go%Xa4i9<&DhlkF1D(%mR zzHj5uVa0Lz;V{^Ce$n%yILP>HPGMuO0@#CEirJU zB30P0Ff2eU(u%=)0rmTMD_w_q4WF(WsQu5BV{J}CLI=ADL zUSE)Vcaw^_D5%K)d$foBnLLYzcEpu#rT{D|Y=&?Daest$=apGh!u}1XM)_ok( zj<*Vy`-7a)m6JH0B_KlyhwQI#?Gkw??(GANi})WaZ`&77M@Lw)OW>dVnYUtQy2@>B zdww5=kd)i1;J0+7hSoZp{wtBLMCJqFrSm5&MVD5Jyv}8^&{`x0NIGQxL3EIcq}j7V zZPUXtT>D{OGLVaf0I2BH_UE^QN%&i~m{D&Si$QZ=G%F!^qKe}q zCXBtNxG&LtN5bptC@pt~2_#yoIFR{Oo7i*XW|X9GUu&i*S>Mmho-@1=vl$V(GvCg2 z7^@dJUYj!adt`uhNBD#7WY1SK$2T2C7E<9ZF>fAxp&Gj@K^r3A`O)JFp>OL(2EM93 zn{7gH4yCbY-DV!>j#+7Tcw5ONc&_!wYPkmRQW^s?L?Gk?+JxZXq`jTc%cU5aXj%!; zdnD zGKijqy&mR%hx?gn8~znFLP0whshk5VvE9ThoM~WD8!0>cSDmqA`)?up(D3}8!*1`< zTjo6@ibnQs<&W9M&vxhYCx#rGoBvLS{^yAZFRp!X$LN7}BFnALb0+r#;qpkSFt;hk zkIkH5I-VzTltSU%|XtN!x(;d1SCqC6%VU^lnP=lZ(PU}%U zm52AfFngglP#Bc##X7MT;@Sj6wl?ltYf^cBn_WvO3&dv<-+k+GYVo{D7}WvnK!Jhi z?LK0^rtRBe5uL|VBc%l@!CE>eGR zc+CHJwzpl8a*IOCT5J&LQ=n6^O<*eeglxzGBVnC&+^*-sWqnOm%qlox*G+3_SB;FJ zt1}-VoE7saM)9z53R%je@b(T1j42@9l8tPH0x^?wgpTSDzF4=<%{SL(_6wKo# zufY3;gzipQQ$s?8nLV&wGNEe|x{=yn;kKNgSF=Ik8PndT+x`2=rhHq;UY)p-2UR_~ zc-^(;f_cEM{atGtxmBIFuna2w1`{%J=pVn(o3*dpOU_lvvF4>hFwrq_%|pjt zJ(f4wUp$%Vki0L=fipZc6p8(KdKU=jS{!IBz3SM9-El2<2J7Da#hf^|bLfl|UcLM; zW0T|2n{K|GCn3`GJ}8hgwLljPU?k)QH*~UeJ+rb+i|;4vjbw1Nrju9MK3lW%N_AK)6b^v5tj6d`exAgEr8&~PC6;+)oOqH)#J*0 zwQ}7W5q>eB01GU2^2N94+1wwulGG2s2oB>+Sc5Z^E|q8;>y+k>ztvq#p|AXUqLVIp zj7y&^*|hX~BZ_j^1Tr|%cHTED`n~wd!O7uNaiB)v9GstaZSnKj$hZhW3c=BT6@|{} zYDe^Q+0t6C@ZXsBw)U;`g}JPK^vT1ebAIwScI1!U;aynu?yNXselM`hM9IIv%7>Hf zhtpVW?!HLzyzJ{riAC}#!Lh~XiWKZ3<2}p@iA9fx*uQFAo>O{Aw7Va#Hne6vxDsyc z6foP44cN#zlf98n%g(%7O%pMi&!rka{c<}0!Ov3~q0HX!+i48yv^B@4p@y>vEPuc2 z!@1fQmZvoNt({Lzg*wgM&8Cq?%awJnqkz@ss+gA*lekCIXEgyw7#X}eb1|0Wh~zh_ zku{m^+8MjZs{;^-h}+IJ`ug8b!YFyN%d%wV7JLLVCJNCs$+{o?I;&7;IuWW4o%%lc z+7E6vEI2i?(NOfaFX-)#-Cd@F0K|A;aJcSu)O~RT`A7f)F4`c0(^Z z`a)Rh>&2w*Zv)>rP% zYw}@+oh9?ZDr)QrOIWWTE&DZ6!vhxg1*Df7I7T~^cdW74U^y2QE0OC~0|hfKZnke2 z$B8`41!7*Ow*)U#CCXreL#Co53iNuBur`n0w41j(F%<52U2$^LR6>P6jyG9jtEPUz zr|Y+Z2Xs@#XqHi$asA8@X)e9BCeA=h;(m;e$k%u@a*gEQefltuc0*$6rJ{hW84_aq z2e`~Sq*^2r5Op0Qgkmh^yvOiy+NG6+`1&G3nG@qmd`@R}2GXMq^A(xXmTUFV^$t?G z6;vha$1n2V>hNJeSc*jDYrriB9am}rrkGx~kOhlayyk5WZ8ay0aH#mtEvcZT)@1a1 zrPamPDFAkdxiEc2k~(+Y7UivQg!!7Q@m8JJ-AD<`4Rx3z*j2 z@n^C!MJLqz!xB6uJe$Ymkr(E&=X&A=IC~oIk@&K`9!Z;J>Da1HhtOEY(!AhRdpmqlCJgx92l3d%O-er6J>iv#Yj4j z6gXKm7ze?M?ah+{UvjM7*e{JzDm(Zk!))?Rkz~iElI6T&rU=i*y=ftOlH3+Ory5r| zvS!!ZP63pKGbnBrup`$)q2R;Z#9I!Lfojt?L-*Lvi3u|gS1`UsK)txm)h=gI5QhTp z&{gDCEji2$&dr`HRDc3-^K<$$T6?h$)O6za;FMo~i8+@P6H;CYcG&-Rr#CnAApSLM zm9AC7xTX5cY#(ww_MzR zWs7V@++weXaMzvhB7ej}yb7sTgmvofP5+Rhy(YJ}WCof|Vmd-w>@fbaL=jd5TVq$=#K#Z7E6NP)H(pbv!%$So4Q1O#tIg@I{M$-)Jn? zh&8kDT|ACCu~Yp^<|vvwC_aBPEXYnKM+-6aNBgp|VwuSF(m>xhJ?LTOAl**vkvP(r7B5L{$5W;8DR8PJ8zA$qAj~ z=WEsD^vomkzn;eAD6|h1^B+T^cHuu2+FUC&jz!M@o5Na7$|+J+eN(#@Na zwfcmELg2}pM+|vwv%M9LY16HED9eetaLEalc>qoNI(fffMk{b=1w6Cz3Db}@?H&-H zNG)w$;5VXu$8Yx{5e;l&sx@osFSN$gE4nf#Bhlo3H#PAx6C})A#R*#?Fo@CYU^;SI zN?)Os^f~Z$tr}uphEzKBLQ`Lf`?ziD%t;|k#jGt!wS<<|3teChxIh1vuYHByr9H@e zT9)9H2rm0b*l5INgFuI@y}v0FG8Uk?zIn}M?AwZf2?8%(K|WVy^i{JKhx#dk=y>)l zUbUf9E%h;L$R$QIAp0seNf`o^9SQBgKe=^Ne3n&qMQGC)zLL)yuHLq+;V|IeVC1P} zX>=~Zv|hC?&V^rZ8JQrx_uU1U4eOPu_1R^q2Hct4Z+~7Ud*iRNh*gG&DCe4nw55sB zZOfVtni&=qd4;3Dsv1DVzPu_)q+ej8aEnA5Cnky-7MGX;n}IvZ#)-Nlpj;Jud}BW) z9?5H&YVay(a8CB*gf1{tl=s(LYQ~gJGjGw+R*0LpRrMcD@msv+=S3oNmA;JS9RT4- zuD==LsO-aYjj>=kl2FU=-~qxIa$F(leVsl1M^PL-O#!(-%Mg(s;#aVG8E7C}-=L~m zUz0;cHh(F!ufP|tZdN4C&kgpY5@f)z$)W}5hgD^HBn02 zVhOeK-_HsGJmt2RRWuaEaK#+>X{F&0Z!0t( ze!AMeUvf=C?;YDcp$W7Eem;1?PH0VPU60LmZG1ah0_#gvMad%#OX%~!-F(z&>Y}8E z=!r|zEa2f!86}zcR=&&;;WlU)P7ofVll(#31oE!!@E3IJ;vd8DwYiuLGAy$w!M$P7 z7%9H;@dY99g5lOSW@tE46qV3PfcS5k;lL`UKYPDC^)*J}hvC;A;jIHIA0-( zso;*|`vMXhC8V0K?4ITIr>gdQhq1-*-LQgGQkgxsZ2y`Dcx|bD^tYN?s^IQx1Mz1X z(JJa9VR-_v;1n?f7?Nw25_Y@fwQNeN<*x0zGAUV?>U%G$VEXu)nmM#t{X)_|^M>tp zg-7JY&F1T(K=kFw1$slP!%TA~fg+V6>n$r7zcScL`)G)$=q(SyKF-ZMc-y}<*1r$rAC0$l_uQfC4cAWsW9Q{< zR}Z0xWh!zxCHbNn%S(%bA8Mf2l8__H4YpAA#|TgIWWe~u zd?Fv#wh^jvVWEv^Y2~jgOYsY}8~nb_2n=n;P@(%8etyw7EcLaOxC3>&y~o2jbcdByb8wv z_XcHU1#)~2$h?>Q;I8|`H8CN7+&32TY_K~T%pWLLh8nDHNmpm+_u zx|vc&)=j^z)_Vmfh>{>q@UKv6!r)RTWg_r<+b_sK@LB$ykd;X~awu!LkEu;{s?j2| z8Yu!ZI`T)ubw~uaaUV6IhXNIPV$wdc&ifD=v61uNu*sm;@aC+sd$%`m96Q?))wVS{$ ztI;eUxtUDQ8v^Q(yxF~7l8U7mN9O4>-e@CfUi(H(!g4Gj9_@#_j4v-A`RsF2IN_FJ zmH|8)0kO_jf5jfOMrMy@FS;V?BErA{wz)ZY~{HUF#e zt3ppf6;f4vdUIO9Z{|qS)Xbg?n*}41A)u4smFBCi{I^SBxGqI z&;tEqerkcsBU7z|Ni^I-YXJ#t5Vrrz!k3}^@xvwcJ@OqT_P$jLb<CL78NgPL)4aT2lBO?7oN>9$J>lKx9;h2@|j*&io=E6wlcdtc`W_|A&A=r54@ zpRQ5z@I=MAX)zGiDC|=-5Mvm0m&o-qH(~!p>dK)!IeA6J`Z)Kbi$kYRt%_h%3)3r) z10Alvy{d)H3Z10lol-IGWvAjB|cYc%IotdOk>i^L0IQvJ3MjUzAR7}p&su=7=BkZv$o%ow7e&>x=W8vD3a<*}XGgvOiwqByDFKJ( zr}sfUnAMX58TgRH8kbQkS{-hh?XR!f;bC|NghqPzWNUfv1~p)76Q@m)Ax_je3Z2uB zhJVr&Ngz^r?QEG2_rT$gb*EJA&~>#A@F3-!ra675ufk;fj`A^FW`m1(4&Ao@GLSVcXmY?|gc#b%4OSR8}hh{#@Upzdvwlfx@AZic+SCI{>9l zhdc&jgXV%Han}8_CG%{L)?487#Upx7!TAFQVHVt~BbEsv9DKSNw4HBw&%d+WGtNj? zMU5C7hHej}b+LRyP0_IQZX;#ylJ+2ozbRgMviXOqS%YeB#}N;}Y6DmK27opaZxalG ztykJt!mPHH#DY3HSX9sZe1)oKtiyoGG zfFHC)h#GuKG8bs)etg;-1(Igje&Y{=41zzEne-o2n;Mv6+Da-wa72Hr{*L2Og+X@< zX6In`t}Dhr4ELvk!~;1Z%SxV~Lx!e2aODm>OgCpC3TGI!^j-@cGWkUxlGAMtW3Bpv zRP6A&fZ&27=k>Ai%e0T zKLvn(;SQI}0!zN>z^=bS9}T_B%n|r1mYZzU znj>4Wc||^8%Py%$?QgTl{+DIfeE8rUfe#CbIhJZ)j1&#bz)ckPZE{FzC{mi>9z zXb21J6Kf2x7I@eIcG$eBI{fSbfybaIBt77h8J@`QYbE1XLhe!H9tZEf?(g+aFMzz& zwyQO*_%5Iq2yGFT-0mn6q+>;yf;JF4A*tT(FIL8jo;p^jtmprNFkrKI$8W zGrbV$>mN#ei?&6&zl|AepHclxzT`|Y%>mg$UiFPUJ7P&$Oab#TjQh;B{r_J|$R(71 zxG1ha0d}>e|nv~aEk;r#za}>SvRrBW+NA~Y;3$@Y997ZhiYs$BN7Zos^ zcwady`1YDLIteJQ_7Zy_3=1f(CCQTSk>XNi5Baqo0Vv}9Kv;lUv27!Y**F={l!(Ic zH{2)vrJv&0j7&69z99$7GJpX&HnXlJIcF2d@`muS=WHTSrjMALt&<8K{Kd9+`n0O@ zV+E;h7vq)(HN?wG1-bWiZ4(IqnsHPdW?-8~B%S-Q4C{eU9Ln_?Q6yLjL1KO+&)PCO-gA9On6qLol!q55-h0#26q^VHvsO7RIM0*TYU(i^1kKtMfG|Rq!UiBv=myp$K=Ag3&u03-3{oJBIKtT zV7=}EGv5`#_v&&^qc^M$jK`#@Fh0dZrFJ{O8mUNs){wxJzbLF0zNUO1gx(Mz_V9HR zX!LWILh`txRcpKybV-v`F(KL+SGOx?JYmI>$8AUb!D9XTPOD~Xa^q*1THa*MsO6~= zhyDte-y2hJhf!A^cde*NVN)(Ky4|3tQj*Qb+8;EW0SGS-M*0U)-!1fpIbw-9we#)v z)EuW-E+-6h2j=!kdBBclg9h$hgSgcU$@5j0^Gdg43X6re zZuaj~ZPtr^fU`-!?NyK-O1=Q=Sp+b^xai?NJ%Yp zso?GB1X1^afd2ls=oFJNb4@vGsZ4wCuEF%0Es0wA9=8#yr8U#uOWOyyvAcBvR_wFn z`)~oi5=fTdICjen#x@_kWe&Xj?|ZKewU6FWy&xiBJpS`Bpm<;V=qn$yHGTBYXD0#1 z)=#GO+}|Vp{n-b=d+Wok@7=nftlx_NQ6K9XWnF!)+lqC=xb9S}dr<4{;<~H7jw=M% zYi$?SLDV{8Tu0mM)WSNCvd%%R6K?+^P1X(Lx?x;5jO&JR-7u~j#&yHEZW#ZIm^ZD* z7}w*o>%r*t$oqQM!g`9vKlv$wM3r^KxQ-at5#u^yTt|%Sh;bb;t|P{E#Q0D8?t1>+ zdj8#d{@r^1-Fp7rdj8#d{@r^1-Fp7re|^Js(rBGDS|^RxNuzbr=>JL@RZZQP+^}K8 l!`s)dn*BfJR0jDQ@;`jVHMR!21HZT7_6@`9Se<)M{|`Jh-5dY_ literal 0 HcmV?d00001 diff --git a/pyproject.toml b/pyproject.toml index 909c16f..9daf651 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.nbqa.addopts] flake8 = [ - "--ignore=E501,W503" + "--ignore=E501,W503,F811" ] [tool.pytest.ini_options] addopts = "--ignore-glob=Untitled* --ignore=snippets.ipynb --ignore-glob=draft*" \ No newline at end of file diff --git a/requirements-dev.in b/requirements-dev.in index d82bb2e..2199777 100644 --- a/requirements-dev.in +++ b/requirements-dev.in @@ -6,3 +6,11 @@ isort flake8 nbqa pre-commit +jupyterlab-code-formatter +gitpython +frictionless +beautifulsoup4 +rdflib>=7.* +rocrate +giturlparse +lxml diff --git a/requirements-dev.txt b/requirements-dev.txt index af217a6..289822d 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,31 +1,102 @@ # -# This file is autogenerated by pip-compile with python 3.8 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: # # pip-compile requirements-dev.in # -asttokens==2.0.5 +aiohttp==3.9.5 + # via + # -c requirements.txt + # tuspy +aiosignal==1.3.1 + # via + # -c requirements.txt + # aiohttp +anyio==4.3.0 + # via + # -c requirements.txt + # jupyter-server +arcp==0.2.1 + # via + # -c requirements.txt + # rocrate +argon2-cffi==23.1.0 + # via + # -c requirements.txt + # jupyter-server +argon2-cffi-bindings==21.2.0 + # via + # -c requirements.txt + # argon2-cffi +arrow==1.3.0 + # via + # -c requirements.txt + # isoduration +asttokens==2.4.1 # via # -c requirements.txt # stack-data -attrs==21.4.0 +async-timeout==4.0.3 + # via + # -c requirements.txt + # aiohttp +attrs==23.2.0 # via # -c requirements.txt + # aiohttp + # frictionless # jsonschema - # pytest -backcall==0.2.0 + # referencing +autopep8==2.1.0 + # via nbqa +beautifulsoup4==4.12.3 # via # -c requirements.txt - # ipython -black[jupyter]==22.1.0 + # -r requirements-dev.in + # nbconvert +bioblend==1.2.0 + # via + # -c requirements.txt + # gxformat2 +black[jupyter]==24.4.0 # via -r requirements-dev.in -cfgv==3.3.1 +bleach==6.1.0 + # via + # -c requirements.txt + # nbconvert +cachecontrol[filecache]==0.14.0 + # via + # -c requirements.txt + # schema-salad +certifi==2024.2.2 + # via + # -c requirements.txt + # requests +cffi==1.16.0 + # via + # -c requirements.txt + # argon2-cffi-bindings +cfgv==3.4.0 # via pre-commit -click==8.0.4 - # via black -coverage==6.3.2 +chardet==5.2.0 + # via frictionless +charset-normalizer==3.3.2 + # via + # -c requirements.txt + # requests +click==8.1.7 + # via + # -c requirements.txt + # black + # rocrate + # typer +comm==0.2.2 + # via + # -c requirements.txt + # ipykernel +coverage==7.4.4 # via nbval -debugpy==1.5.1 +debugpy==1.8.1 # via # -c requirements.txt # ipykernel @@ -33,204 +104,519 @@ decorator==5.1.1 # via # -c requirements.txt # ipython -distlib==0.3.4 +defusedxml==0.7.1 + # via + # -c requirements.txt + # nbconvert +distlib==0.3.8 # via virtualenv -entrypoints==0.4 +exceptiongroup==1.2.1 # via # -c requirements.txt - # jupyter-client -executing==0.8.3 + # anyio + # ipython + # pytest +executing==2.0.1 # via # -c requirements.txt # stack-data -filelock==3.6.0 - # via virtualenv -flake8==4.0.1 +fastjsonschema==2.19.1 + # via + # -c requirements.txt + # nbformat +filelock==3.13.4 + # via + # -c requirements.txt + # cachecontrol + # virtualenv +flake8==7.0.0 # via -r requirements-dev.in -identify==2.4.11 +fqdn==1.5.1 + # via + # -c requirements.txt + # jsonschema +frictionless==5.14.5 + # via -r requirements-dev.in +frozenlist==1.4.1 + # via + # -c requirements.txt + # aiohttp + # aiosignal +galaxy2cwl==0.1.4 + # via + # -c requirements.txt + # rocrate +gitdb==4.0.11 + # via gitpython +gitpython==3.1.43 + # via -r requirements-dev.in +giturlparse==0.12.0 + # via -r requirements-dev.in +gxformat2==0.18.0 + # via + # -c requirements.txt + # galaxy2cwl +humanize==4.9.0 + # via frictionless +identify==2.5.36 # via pre-commit -importlib-resources==5.4.0 +idna==3.7 # via # -c requirements.txt + # anyio # jsonschema -iniconfig==1.1.1 + # requests + # yarl +iniconfig==2.0.0 # via pytest -ipykernel==6.9.1 +ipykernel==6.29.4 # via # -c requirements.txt # nbval -ipython==8.1.1 +ipython==8.23.0 # via # -c requirements.txt # black # ipykernel # nbqa -ipython-genutils==0.2.0 +isodate==0.6.1 # via # -c requirements.txt - # nbformat -isort==5.10.1 + # frictionless + # rdflib +isoduration==20.11.0 + # via + # -c requirements.txt + # jsonschema +isort==5.13.2 # via -r requirements-dev.in -jedi==0.18.1 +jedi==0.19.1 # via # -c requirements.txt # ipython -jsonschema==4.4.0 +jinja2==3.1.3 + # via + # -c requirements.txt + # frictionless + # jupyter-server + # nbconvert + # rocrate +jsonpointer==2.4 # via # -c requirements.txt + # jsonschema +jsonschema[format-nongpl]==4.21.1 + # via + # -c requirements.txt + # frictionless + # jupyter-events # nbformat -jupyter-client==7.1.2 +jsonschema-specifications==2023.12.1 + # via + # -c requirements.txt + # jsonschema +jupyter-client==8.6.1 # via # -c requirements.txt # ipykernel + # jupyter-server + # nbclient # nbval -jupyter-core==4.9.2 +jupyter-core==5.7.2 # via # -c requirements.txt + # ipykernel # jupyter-client + # jupyter-server + # nbclient + # nbconvert # nbformat -matplotlib-inline==0.1.3 +jupyter-events==0.10.0 + # via + # -c requirements.txt + # jupyter-server +jupyter-server==2.14.0 + # via + # -c requirements.txt + # jupyterlab-code-formatter +jupyter-server-terminals==0.5.3 + # via + # -c requirements.txt + # jupyter-server +jupyterlab-code-formatter==2.2.1 + # via -r requirements-dev.in +jupyterlab-pygments==0.3.0 + # via + # -c requirements.txt + # nbconvert +lxml==5.2.1 + # via -r requirements-dev.in +markdown-it-py==3.0.0 + # via rich +marko==2.0.3 + # via frictionless +markupsafe==2.1.5 + # via + # -c requirements.txt + # jinja2 + # nbconvert +matplotlib-inline==0.1.7 # via # -c requirements.txt # ipykernel # ipython -mccabe==0.6.1 +mccabe==0.7.0 # via flake8 -mypy-extensions==0.4.3 - # via black -nbformat==5.1.3 +mdurl==0.1.2 + # via markdown-it-py +mistune==3.0.2 + # via + # -c requirements.txt + # nbconvert + # schema-salad +msgpack==1.0.8 + # via + # -c requirements.txt + # cachecontrol +multidict==6.0.5 # via # -c requirements.txt + # aiohttp + # yarl +mypy-extensions==1.0.0 + # via + # -c requirements.txt + # black + # schema-salad +nbclient==0.10.0 + # via + # -c requirements.txt + # nbconvert +nbconvert==7.16.3 + # via + # -c requirements.txt + # jupyter-server +nbformat==5.10.4 + # via + # -c requirements.txt + # jupyter-server + # nbclient + # nbconvert # nbval -nbqa==1.3.0 +nbqa==1.8.5 # via -r requirements-dev.in -nbval==0.9.6 +nbval==0.11.0 # via -r requirements-dev.in -nest-asyncio==1.5.4 +nest-asyncio==1.6.0 # via # -c requirements.txt # ipykernel - # jupyter-client -nodeenv==1.6.0 +nodeenv==1.8.0 # via pre-commit -packaging==21.3 +overrides==7.7.0 # via # -c requirements.txt + # jupyter-server +packaging==24.0 + # via + # -c requirements.txt + # black + # ipykernel + # jupyter-server + # jupyterlab-code-formatter + # nbconvert # pytest -parso==0.8.3 +pandocfilters==1.5.1 + # via + # -c requirements.txt + # nbconvert +parso==0.8.4 # via # -c requirements.txt # jedi -pathspec==0.9.0 +pathspec==0.12.1 # via black -pexpect==4.8.0 +petl==1.7.15 + # via frictionless +pexpect==4.9.0 # via # -c requirements.txt # ipython -pickleshare==0.7.5 +platformdirs==4.2.0 # via # -c requirements.txt - # ipython -platformdirs==2.5.1 - # via # black + # jupyter-core # virtualenv -pluggy==1.0.0 +pluggy==1.5.0 # via pytest -pre-commit==2.17.0 +pre-commit==3.7.0 # via -r requirements-dev.in -prompt-toolkit==3.0.28 +prometheus-client==0.20.0 + # via + # -c requirements.txt + # jupyter-server +prompt-toolkit==3.0.43 # via # -c requirements.txt # ipython +psutil==5.9.8 + # via + # -c requirements.txt + # ipykernel ptyprocess==0.7.0 # via # -c requirements.txt # pexpect + # terminado pure-eval==0.2.2 # via # -c requirements.txt # stack-data -py==1.11.0 - # via pytest -pycodestyle==2.8.0 - # via flake8 -pyflakes==2.4.0 +pycodestyle==2.11.1 + # via + # autopep8 + # flake8 +pycparser==2.22 + # via + # -c requirements.txt + # cffi +pyflakes==3.2.0 # via flake8 -pygments==2.11.2 +pygments==2.17.2 # via # -c requirements.txt # ipython -pyparsing==3.0.7 + # nbconvert + # rich +pyparsing==3.1.2 # via # -c requirements.txt - # packaging -pyrsistent==0.18.1 - # via - # -c requirements.txt - # jsonschema -pytest==7.0.1 + # rdflib +pytest==8.1.1 # via # -r requirements-dev.in # nbval -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # -c requirements.txt + # arrow + # frictionless # jupyter-client -pyyaml==6.0 - # via pre-commit -pyzmq==22.3.0 + # rocrate +python-json-logger==2.0.7 + # via + # -c requirements.txt + # jupyter-events +python-slugify==8.0.4 + # via frictionless +pyyaml==6.0.1 + # via + # -c requirements.txt + # frictionless + # galaxy2cwl + # gxformat2 + # jupyter-events + # pre-commit +pyzmq==26.0.2 # via # -c requirements.txt + # ipykernel # jupyter-client + # jupyter-server +rdflib==7.0.0 + # via + # -c requirements.txt + # -r requirements-dev.in + # schema-salad +referencing==0.34.0 + # via + # -c requirements.txt + # jsonschema + # jsonschema-specifications + # jupyter-events +requests==2.31.0 + # via + # -c requirements.txt + # bioblend + # cachecontrol + # frictionless + # requests-toolbelt + # rocrate + # schema-salad + # tuspy +requests-toolbelt==1.0.0 + # via + # -c requirements.txt + # bioblend +rfc3339-validator==0.1.4 + # via + # -c requirements.txt + # jsonschema + # jupyter-events +rfc3986==2.0.0 + # via frictionless +rfc3986-validator==0.1.1 + # via + # -c requirements.txt + # jsonschema + # jupyter-events +rich==13.7.1 + # via typer +rocrate==0.10.0 + # via + # -c requirements.txt + # -r requirements-dev.in +rpds-py==0.18.0 + # via + # -c requirements.txt + # jsonschema + # referencing +ruamel-yaml==0.18.6 + # via + # -c requirements.txt + # schema-salad +ruamel-yaml-clib==0.2.8 + # via + # -c requirements.txt + # ruamel-yaml +schema-salad==8.5.20240410123758 + # via + # -c requirements.txt + # gxformat2 +send2trash==1.8.3 + # via + # -c requirements.txt + # jupyter-server +shellingham==1.5.4 + # via typer +simpleeval==0.9.13 + # via frictionless six==1.16.0 # via # -c requirements.txt # asttokens - # nbval + # bleach + # isodate # python-dateutil - # virtualenv -stack-data==0.2.0 + # rfc3339-validator +smmap==5.0.1 + # via gitdb +sniffio==1.3.1 + # via + # -c requirements.txt + # anyio +soupsieve==2.5 + # via + # -c requirements.txt + # beautifulsoup4 +stack-data==0.6.3 # via # -c requirements.txt # ipython -tokenize-rt==4.2.1 +stringcase==1.2.0 + # via frictionless +tabulate==0.9.0 + # via frictionless +terminado==0.18.1 + # via + # -c requirements.txt + # jupyter-server + # jupyter-server-terminals +text-unidecode==1.3 + # via python-slugify +tinycss2==1.2.1 + # via + # -c requirements.txt + # nbconvert +tinydb==4.8.0 + # via + # -c requirements.txt + # tuspy +tokenize-rt==5.2.0 # via # black # nbqa -toml==0.10.2 - # via pre-commit tomli==2.0.1 # via + # -c requirements.txt + # autopep8 # black # nbqa # pytest -tornado==6.1 +tornado==6.4 # via # -c requirements.txt # ipykernel # jupyter-client -traitlets==5.1.1 + # jupyter-server + # terminado +traitlets==5.14.3 # via # -c requirements.txt + # comm # ipykernel # ipython # jupyter-client # jupyter-core + # jupyter-events + # jupyter-server # matplotlib-inline + # nbclient + # nbconvert # nbformat -typing-extensions==4.1.1 - # via black -virtualenv==20.13.3 +tuspy==1.0.3 + # via + # -c requirements.txt + # bioblend +typer[all]==0.12.3 + # via frictionless +types-python-dateutil==2.9.0.20240316 + # via + # -c requirements.txt + # arrow +typing-extensions==4.11.0 + # via + # -c requirements.txt + # anyio + # bioblend + # black + # frictionless + # ipython + # typer +uri-template==1.3.0 + # via + # -c requirements.txt + # jsonschema +urllib3==2.2.1 + # via + # -c requirements.txt + # requests +validators==0.28.1 + # via frictionless +virtualenv==20.25.3 # via pre-commit -wcwidth==0.2.5 +wcwidth==0.2.13 # via # -c requirements.txt # prompt-toolkit -zipp==3.7.0 +webcolors==1.13 + # via + # -c requirements.txt + # jsonschema +webencodings==0.5.1 + # via + # -c requirements.txt + # bleach + # tinycss2 +websocket-client==1.7.0 + # via + # -c requirements.txt + # jupyter-server +yarl==1.9.4 # via # -c requirements.txt - # importlib-resources + # aiohttp # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/requirements.in b/requirements.in index 08b98d3..a0cf530 100644 --- a/requirements.in +++ b/requirements.in @@ -4,4 +4,7 @@ requests tqdm python-dotenv ipywidgets -requests-cache \ No newline at end of file +requests-cache +rocrate +ipynbname +jupyter-archive \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 317b745..39dd4bc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,276 +1,402 @@ # -# This file is autogenerated by pip-compile with python 3.8 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: # -# pip-compile +# pip-compile requirements.in # -anyio==3.5.0 - # via jupyter-server -appdirs==1.4.4 - # via requests-cache -argon2-cffi==21.3.0 +aiohttp==3.9.5 + # via tuspy +aiosignal==1.3.1 + # via aiohttp +anyio==4.3.0 # via + # httpx # jupyter-server - # notebook +arcp==0.2.1 + # via rocrate +argon2-cffi==23.1.0 + # via jupyter-server argon2-cffi-bindings==21.2.0 # via argon2-cffi -asttokens==2.0.5 +arrow==1.3.0 + # via isoduration +asttokens==2.4.1 # via stack-data -attrs==21.4.0 +async-lru==2.0.4 + # via jupyterlab +async-timeout==4.0.3 + # via aiohttp +attrs==23.2.0 # via + # aiohttp # cattrs # jsonschema + # referencing # requests-cache -babel==2.9.1 +babel==2.14.0 # via jupyterlab-server -backcall==0.2.0 - # via ipython -bleach==4.1.0 +beautifulsoup4==4.12.3 + # via nbconvert +bioblend==1.2.0 + # via gxformat2 +bleach==6.1.0 # via nbconvert -cattrs==1.10.0 +cachecontrol[filecache]==0.14.0 + # via schema-salad +cattrs==23.2.3 # via requests-cache -certifi==2021.10.8 - # via requests -cffi==1.15.0 +certifi==2024.2.2 + # via + # httpcore + # httpx + # requests +cffi==1.16.0 # via argon2-cffi-bindings -charset-normalizer==2.0.12 +charset-normalizer==3.3.2 # via requests -debugpy==1.5.1 +click==8.1.7 + # via rocrate +comm==0.2.2 + # via + # ipykernel + # ipywidgets +debugpy==1.8.1 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -entrypoints==0.4 +exceptiongroup==1.2.1 # via - # jupyter-client - # jupyterlab-server - # nbconvert -executing==0.8.3 + # anyio + # cattrs + # ipython +executing==2.0.1 # via stack-data -idna==3.3 +fastjsonschema==2.19.1 + # via nbformat +filelock==3.13.4 + # via cachecontrol +fqdn==1.5.1 + # via jsonschema +frozenlist==1.4.1 + # via + # aiohttp + # aiosignal +galaxy2cwl==0.1.4 + # via rocrate +gxformat2==0.18.0 + # via galaxy2cwl +h11==0.14.0 + # via httpcore +httpcore==1.0.5 + # via httpx +httpx==0.27.0 + # via jupyterlab +idna==3.7 # via # anyio + # httpx + # jsonschema # requests -importlib-resources==5.4.0 - # via jsonschema -ipykernel==6.9.1 + # yarl +ipykernel==6.29.4 # via - # ipywidgets - # notebook -ipython==8.1.1 - # via - # ipykernel - # ipywidgets + # ipynbname # jupyterlab -ipython-genutils==0.2.0 +ipynbname==2023.2.0.0 + # via -r requirements.in +ipython==8.23.0 # via + # ipykernel # ipywidgets - # jupyter-server - # nbformat - # notebook -ipywidgets==7.6.5 +ipywidgets==8.1.2 # via -r requirements.in -jedi==0.18.1 +isodate==0.6.1 + # via rdflib +isoduration==20.11.0 + # via jsonschema +jedi==0.19.1 # via ipython -jinja2==3.0.3 +jinja2==3.1.3 # via # jupyter-server # jupyterlab # jupyterlab-server # nbconvert - # notebook -json5==0.9.6 + # rocrate +json5==0.9.25 # via jupyterlab-server -jsonschema==4.4.0 +jsonpointer==2.4 + # via jsonschema +jsonschema[format-nongpl]==4.21.1 # via + # jupyter-events # jupyterlab-server # nbformat -jupyter-client==7.1.2 +jsonschema-specifications==2023.12.1 + # via jsonschema +jupyter-archive==3.4.0 + # via -r requirements.in +jupyter-client==8.6.1 # via # ipykernel # jupyter-server # nbclient - # notebook -jupyter-core==4.9.2 +jupyter-core==5.7.2 # via + # ipykernel # jupyter-client # jupyter-server # jupyterlab + # nbclient # nbconvert # nbformat - # notebook -jupyter-server==1.13.5 +jupyter-events==0.10.0 + # via jupyter-server +jupyter-lsp==2.2.5 + # via jupyterlab +jupyter-server==2.14.0 # via + # jupyter-archive + # jupyter-lsp # jupyterlab # jupyterlab-server - # nbclassic # notebook-shim -jupyterlab==3.3.0 +jupyter-server-terminals==0.5.3 + # via jupyter-server +jupyterlab==4.1.6 # via -r requirements.in -jupyterlab-pygments==0.1.2 +jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.10.3 +jupyterlab-server==2.27.0 # via jupyterlab -jupyterlab-widgets==1.0.2 +jupyterlab-widgets==3.0.10 # via ipywidgets -markupsafe==2.1.0 - # via jinja2 -matplotlib-inline==0.1.3 +markupsafe==2.1.5 + # via + # jinja2 + # nbconvert +matplotlib-inline==0.1.7 # via # ipykernel # ipython -mistune==0.8.4 - # via nbconvert -nbclassic==0.3.6 - # via jupyterlab -nbclient==0.5.12 - # via nbconvert -nbconvert==6.4.2 +mistune==3.0.2 # via - # jupyter-server - # notebook -nbformat==5.1.3 + # nbconvert + # schema-salad +msgpack==1.0.8 + # via cachecontrol +multidict==6.0.5 + # via + # aiohttp + # yarl +mypy-extensions==1.0.0 + # via schema-salad +nbclient==0.10.0 + # via nbconvert +nbconvert==7.16.3 + # via jupyter-server +nbformat==5.10.4 # via - # ipywidgets # jupyter-server # nbclient # nbconvert - # notebook -nest-asyncio==1.5.4 - # via - # ipykernel - # jupyter-client - # nbclient - # notebook -notebook==6.4.8 - # via - # nbclassic - # widgetsnbextension -notebook-shim==0.1.0 - # via nbclassic -numpy==1.22.3 +nest-asyncio==1.6.0 + # via ipykernel +notebook-shim==0.2.4 + # via jupyterlab +numpy==1.26.4 # via pandas -packaging==21.3 +overrides==7.7.0 + # via jupyter-server +packaging==24.0 # via - # bleach + # ipykernel # jupyter-server # jupyterlab # jupyterlab-server -pandas==1.4.1 + # nbconvert +pandas==2.2.2 # via -r requirements.in -pandocfilters==1.5.0 +pandocfilters==1.5.1 # via nbconvert -parso==0.8.3 +parso==0.8.4 # via jedi -pexpect==4.8.0 - # via ipython -pickleshare==0.7.5 +pexpect==4.9.0 # via ipython -prometheus-client==0.13.1 +platformdirs==4.2.0 # via - # jupyter-server - # notebook -prompt-toolkit==3.0.28 + # jupyter-core + # requests-cache +prometheus-client==0.20.0 + # via jupyter-server +prompt-toolkit==3.0.43 # via ipython +psutil==5.9.8 + # via ipykernel ptyprocess==0.7.0 # via # pexpect # terminado pure-eval==0.2.2 # via stack-data -pycparser==2.21 +pycparser==2.22 # via cffi -pygments==2.11.2 +pygments==2.17.2 # via # ipython - # jupyterlab-pygments # nbconvert -pyparsing==3.0.7 - # via packaging -pyrsistent==0.18.1 - # via jsonschema -python-dateutil==2.8.2 +pyparsing==3.1.2 + # via rdflib +python-dateutil==2.9.0.post0 # via + # arrow # jupyter-client # pandas -python-dotenv==0.19.2 + # rocrate +python-dotenv==1.0.1 # via -r requirements.in -pytz==2021.3 +python-json-logger==2.0.7 + # via jupyter-events +pytz==2024.1 + # via pandas +pyyaml==6.0.1 # via - # babel - # pandas -pyzmq==22.3.0 + # galaxy2cwl + # gxformat2 + # jupyter-events +pyzmq==26.0.2 # via + # ipykernel # jupyter-client # jupyter-server - # notebook -requests==2.27.1 +rdflib==7.0.0 + # via schema-salad +referencing==0.34.0 + # via + # jsonschema + # jsonschema-specifications + # jupyter-events +requests==2.31.0 # via # -r requirements.in + # bioblend + # cachecontrol # jupyterlab-server # requests-cache -requests-cache==0.9.3 + # requests-toolbelt + # rocrate + # schema-salad + # tuspy +requests-cache==1.2.0 # via -r requirements.in -send2trash==1.8.0 +requests-toolbelt==1.0.0 + # via bioblend +rfc3339-validator==0.1.4 # via - # jupyter-server - # notebook + # jsonschema + # jupyter-events +rfc3986-validator==0.1.1 + # via + # jsonschema + # jupyter-events +rocrate==0.10.0 + # via -r requirements.in +rpds-py==0.18.0 + # via + # jsonschema + # referencing +ruamel-yaml==0.18.6 + # via schema-salad +ruamel-yaml-clib==0.2.8 + # via ruamel-yaml +schema-salad==8.5.20240410123758 + # via gxformat2 +send2trash==1.8.3 + # via jupyter-server six==1.16.0 # via # asttokens # bleach + # isodate # python-dateutil + # rfc3339-validator # url-normalize -sniffio==1.2.0 - # via anyio -stack-data==0.2.0 +sniffio==1.3.1 + # via + # anyio + # httpx +soupsieve==2.5 + # via beautifulsoup4 +stack-data==0.6.3 # via ipython -terminado==0.13.3 +terminado==0.18.1 # via # jupyter-server - # notebook -testpath==0.6.0 + # jupyter-server-terminals +tinycss2==1.2.1 # via nbconvert -tornado==6.1 +tinydb==4.8.0 + # via tuspy +tomli==2.0.1 + # via jupyterlab +tornado==6.4 # via # ipykernel # jupyter-client # jupyter-server # jupyterlab - # notebook # terminado -tqdm==4.63.0 +tqdm==4.66.2 # via -r requirements.in -traitlets==5.1.1 +traitlets==5.14.3 # via + # comm # ipykernel # ipython # ipywidgets # jupyter-client # jupyter-core + # jupyter-events # jupyter-server + # jupyterlab # matplotlib-inline # nbclient # nbconvert # nbformat - # notebook +tuspy==1.0.3 + # via bioblend +types-python-dateutil==2.9.0.20240316 + # via arrow +typing-extensions==4.11.0 + # via + # anyio + # async-lru + # bioblend + # cattrs + # ipython +tzdata==2024.1 + # via pandas +uri-template==1.3.0 + # via jsonschema url-normalize==1.4.3 # via requests-cache -urllib3==1.26.8 +urllib3==2.2.1 # via # requests # requests-cache -wcwidth==0.2.5 +wcwidth==0.2.13 # via prompt-toolkit +webcolors==1.13 + # via jsonschema webencodings==0.5.1 - # via bleach -websocket-client==1.3.1 + # via + # bleach + # tinycss2 +websocket-client==1.7.0 # via jupyter-server -widgetsnbextension==3.5.2 +widgetsnbextension==4.0.10 # via ipywidgets -zipp==3.7.0 - # via importlib-resources - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +yarl==1.9.4 + # via aiohttp diff --git a/rights-on-images.csv b/rights-on-images.csv deleted file mode 100644 index b4e5689..0000000 --- a/rights-on-images.csv +++ /dev/null @@ -1,180 +0,0 @@ -id,full_name,Free/CC Public Domain,Free/CC BY,Free/CC0,Free/RS NKC,Free with conditions/CC BY-SA,Free with conditions/CC BY-NC,Free with conditions/CC BY-NC-ND,Free with conditions/CC BY-NC-SA,Free with conditions/InC-EDU,Restricted/RS InC,Restricted/RS InC-RUU,Restricted/RS CNE,Restricted/RS UND -VPWLH,4th/19th Prince of Wales' Light Horse Regiment Unit / 4th/19th Prince of Wales' Light Horse Regiment Unit. History Room.,0,0,0,0,0,0,0,1063,0,0,0,0,0 -ADFA,"Academy Library, UNSW Canberra.",0,0,0,0,0,0,0,1,0,0,0,0,0 -VAPRC,Albert Park-South Melbourne Rowing Club.,0,0,0,0,0,0,0,13,0,0,0,0,0 -VTAMA,Ararat Gallery TAMA.,0,0,0,0,0,0,0,407,0,0,0,0,0 -NACU:E,Australian Catholic University. / Australian Catholic University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VER,Australian Council for Educational Research / Australian Council for Educational Research. Cunningham Library (ACER).,0,0,0,0,0,0,1,0,0,0,0,0,0 -VJAZ,Australian Jazz Museum.,0,0,0,0,0,0,0,16,0,0,0,0,0 -AAUD,Australian National Audit Office / Australian National Audit Office. Information Research Centre.,0,0,0,0,0,0,0,1,0,0,0,0,0 -ANU,Australian National University. / Australian National University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -ANU:IR,Australian National University. / Australian National University Library. / Australian National University: Institutional Repository.,0,8,0,0,2,0,0,0,0,0,0,0,0 -ANU:HITL,Australian National University. / Heritage in the Limelight.,0,1,0,0,0,0,0,0,0,0,0,0,0 -VANF,Australian Nursing and Midwifery Federation. / Victorian Branch Library.,0,0,0,0,0,0,0,77,0,0,0,0,0 -APC:WC,Australian Paralympic Committee / Australian Paralympic Committee. Images on Wikimedia Commons.,0,0,0,0,2563,0,0,0,0,0,0,0,0 -AAWM,Australian War Memorial. / Research Centre.,247673,0,0,0,0,0,0,0,0,0,0,0,0 -VGKM,Australian and New Zealand College of Anaesthetists. / Geoffrey Kaye Museum of Anaesthetic History.,0,0,0,0,0,0,0,331,0,0,0,0,0 -VBLM,B-24 Liberator Memorial Restoration Australia Inc.,0,0,0,0,0,0,0,117,0,0,0,0,0 -VBAIRN,Bairnsdale RSL Sub Branch.,0,0,0,0,0,0,0,5,0,0,0,0,0 -VBTNL,Ballarat Health Services. / Trained Nurses League.,0,0,0,0,0,0,0,1456,0,0,0,0,0 -VBAH,Ballarat Heritage Services.,0,0,0,0,0,0,0,4026,0,0,0,0,0 -VUB,Ballarat Regional Trades and Labour Council Inc..,0,0,0,0,0,0,0,576,0,0,0,0,0 -TBMHC,Beaconsfield Mine and Heritage Centre.,0,2,0,0,0,0,0,3,0,0,0,0,0 -VBA,Bendigo Art Gallery / Bendigo Art Gallery. Bendigo Art Gallery Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VBTH,Bendigo Trades Hall Council & Literary Institute Inc.,0,0,0,0,0,0,0,75,0,0,0,0,0 -VBCF,Blacksmith's Cottage & Forge.,0,0,0,0,0,0,0,107,0,0,0,0,0 -VRDN,Bolton Clarke Library Services.,0,0,0,0,0,0,0,193,0,0,0,0,0 -QBON,Bond University. / John and Alison Kearney Library .,0,63,0,0,2,1,121,0,0,0,0,0,0 -VBRI,Brighton Historical Society.,0,0,0,0,0,0,0,205,0,0,0,0,0 -VCARM,CAVAL. / CARM Centre.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VS:PUB,CSIRO Publishing / CSIRO Publishing. scienceimage.,0,3765,0,0,0,0,0,0,0,0,0,0,0 -VCGG,Camberwell Girls Grammar School.,0,0,0,0,0,0,0,28,0,0,0,0,0 -VBALL,Central Highlands Libraries / Central Highlands Libraries. Ballarat Library.,0,0,0,0,0,0,0,411,0,0,0,0,0 -QCQU,Central Queensland University. / Rockhampton Campus Library.,0,0,0,0,0,0,1,0,0,0,0,0,0 -QCQU:IR,Central Queensland University. / Rockhampton Campus Library. / Central Queensland University: Institutional Repository.,0,1,0,0,0,0,0,0,0,0,0,0,0 -XNTU,Charles Darwin University. / Casuarina Campus Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -NCSU:W,Charles Sturt University. / Wagga Wagga Campus Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VCHEL,Chelsea and District Historical Society Inc..,0,0,0,0,0,0,0,3,0,0,0,0,0 -VCCAP,Christ Church Anglican Parish of Warrnambool.,0,0,0,0,0,0,0,33,0,0,0,0,0 -VCIHF,Churchill Island Heritage Farm.,0,0,0,0,0,0,0,1519,0,0,0,0,0 -VBOR,City of Boroondara Library Service.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VCML,City of Melbourne Libraries.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VMHS,City of Mordialloc Historical Society Inc. / City of Mordialloc Historical Society Inc. Mordialloc and District Historical Society Library.,0,0,0,0,0,0,0,7,0,0,0,0,0 -NSCA,City of Sydney Archives.,329,59087,0,0,0,0,0,0,0,0,0,0,0 -NCHR,Coffs Collections.,0,16170,0,0,0,0,0,0,0,0,0,0,0 -VCHM,Creswick Museum.,0,0,0,0,0,0,0,25,0,0,0,0,0 -WCU,Curtin University. / Curtin University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VDU,Deakin University. / Deakin University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VDU:IR,Deakin University. / Deakin University Library. / Deakin University: Institutional Repository.,0,0,0,0,0,0,5,0,0,0,0,0,0 -VDVHS,Dingley Village Historical Society.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VEGFHG,East Gippsland Family History Group.,0,0,0,0,0,0,0,2,0,0,0,0,0 -WCX,Edith Cowan University. / Edith Cowan University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -WCX:IR,Edith Cowan University. / Edith Cowan University Library. / Edith Cowan University: Institutional Repository.,0,1,0,0,0,0,0,0,0,0,0,0,0 -VELTH,Eltham District Historical Society.,0,0,0,0,0,0,0,10840,0,0,0,0,0 -SEPR,Eyre Pensinsula Railway Preservation Society.,0,0,0,631,0,0,0,0,0,1922,0,0,248 -VFED,Federation University Australia Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VFED:IR,Federation University Australia Library. / Federation Research Online.,0,2,0,0,0,0,0,0,0,0,0,0,0 -VFED:A,Federation University Australia Library. / Federation University Art Collection.,0,0,0,0,0,0,0,2041,0,0,0,0,0 -VFED:HC,Federation University Australia Library. / Federation University Historical Collection.,0,0,0,0,0,0,0,7328,0,0,0,0,0 -VFSM,Fire Services Museum of Victoria.,0,0,0,0,0,0,0,37,0,0,0,0,0 -YUF,Flickr.,15913,6760,81,0,6264,21018,17483,29193,4400,0,0,0,0 -SFU,Flinders University. / Flinders University Central Library.,0,0,0,0,0,0,0,2,0,0,0,0,0 -VGCC,Geelong Cycling Club.,0,0,0,0,0,0,0,94,0,0,0,0,0 -VGFC,Geelong Football Club.,0,0,0,0,0,0,0,11,0,0,0,0,0 -VGRSL,Geelong RSL Sub Branch.,0,0,0,0,0,0,0,227,0,0,0,0,0 -VGFCJ,Genazzano F.C.J. College Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VGEHS,Glen Eira Historical Society.,0,0,0,0,0,0,0,1363,0,0,0,0,0 -QGU:IR,Griffith University. / Griffith University Library. / Griffith Research Online.,0,1,0,0,0,0,0,1,0,0,0,0,0 -VHEP,Hepburn Shire Council.,0,0,0,0,0,0,0,9,0,0,0,0,0 -VNEA,High Country Library Network.,0,0,0,0,0,0,0,1,0,0,0,0,0 -NHH,Historic Houses Trust of New South Wales (Sydney Living Museums). / Caroline Simpson Library & Research Collection .,0,0,0,0,0,0,0,1,0,0,0,0,0 -VHOB:CC,Hobsons Bay City Council.,0,0,0,0,0,0,0,3,0,0,0,0,0 -VHOB,Hobsons Bay Libraries.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VBBM,Indigo Shire Council. / Beechworth Burke Museum.,0,0,0,0,0,0,0,523,0,0,0,0,0 -VILH,Inverloch Historical Society.,0,0,0,0,0,0,0,963,0,0,0,0,0 -QJCU,James Cook University. / Eddie Koiki Mabo Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -QJCU:IR,James Cook University. / Eddie Koiki Mabo Library. / Townsville Campus Library: Institutional Repository.,0,0,0,0,0,0,194,0,0,0,0,0,0 -VKHI,Kew Historical Society.,0,0,0,0,0,0,0,5079,0,0,0,0,0 -VKAC,Kingston Arts Centre.,0,0,0,0,0,0,0,64,0,0,0,0,0 -VKING,Kingston Information and Library Service. / Kingston Information & Library Service.,0,0,0,0,0,0,0,2,0,0,0,0,0 -VBX,"La Trobe University. / La Trobe University Library. / Bendigo Campus, Heyward Library.",0,0,0,0,0,0,0,1,0,0,0,0,0 -VLU,"La Trobe University. / La Trobe University Library. / Borchardt Library, Melbourne (Bundoora) Campus.",0,0,0,0,0,0,0,1,0,0,0,0,0 -VLTV,Latrobe City Library / Latrobe City Library. Morwell Public Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -TSL,Libraries Tasmania.,0,0,0,1,0,0,0,1,0,0,0,0,0 -XNLS,Library & Archives NT.,0,39821,0,0,0,0,0,0,0,0,0,0,0 -NMQU,Macquarie University. / Macquarie University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VMBB,Maldon Brass Band.,0,0,0,0,0,0,0,132,0,0,0,0,0 -VMAS,Maldon Masonic Centre Association Inc..,0,0,0,0,0,0,0,287,0,0,0,0,0 -VMVM,Maldon Vintage Machinery Museum.,0,0,0,0,0,0,0,165,0,0,0,0,0 -VMLT:CHC,Melton City Council Library Service. / Community Heritage Collection.,0,0,0,0,0,27,0,0,0,0,0,0,0 -VMLT,Melton City Council Library Service. / Melton Library & Learning Hub.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VMCPF,Millewa Community Pioneer Forest & Historical Society.,0,0,0,0,0,0,0,61,0,0,0,0,0 -VMOU,Monash University. / Monash University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VMOU:RD,Monash University. / Monash University Library. / Monash University Research Data.,3,30,0,0,1,4,10,4,2,28,1,0,0 -NMTC,Moore Theological College / Moore Theological College. Moore Theological College Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VMAVE,Mount Alexander Vintage Engine Club.,0,0,0,0,0,0,0,59,0,0,0,0,0 -WMDU,Murdoch University. / Murdoch University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VMURR,Murrumbeena Cricket Club.,0,0,0,0,0,0,0,84,0,0,0,0,0 -VNMU:I,Museums Victoria. / Museums Victoria Collections.,0,93996,0,0,0,0,0,0,0,0,0,0,0 -VNMU,Museums Victoria. / Museums Victoria Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VNAM,National Alpine Museum of Australia Inc..,0,0,0,0,0,0,0,1,0,0,0,0,0 -ANG:C,National Gallery of Australia. / National Gallery of Australia Collection.,1,0,0,0,0,0,0,0,0,0,0,0,0 -VNG:C,National Gallery of Victoria. / National Gallery of Victoria Collection.,3,0,0,0,0,0,0,0,0,0,0,0,0 -VNG,National Gallery of Victoria. / The Shaw Research Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -ANL,National Library of Australia.,3,0,0,6,0,0,0,3,0,1,0,0,0 -AMOA:C,National Museum of Australia. / National Museum of Australia Collection.,5125,0,0,0,3095,0,0,393,0,12010,0,9209,152 -NNTA,National Trust of Australia (New South Wales). / National Trust of Australia (NSW) Archive/Library.,0,0,0,0,0,0,0,2,0,0,0,0,0 -VNWM,National Wool Museum.,0,0,0,0,0,0,0,7479,0,0,0,0,0 -VHCERB,Naval Heritage Collection. / HMAS Cerberus Museum.,0,0,0,0,0,0,0,59,0,0,0,0,0 -VOCSA,Old Castlemaine Schoolboys Association Inc..,0,0,0,0,0,0,0,404,0,0,0,0,0 -VORH:M,Orbost Historical Society Museum. / Orbost & District Historical Society.,0,0,0,0,0,0,0,2410,0,0,0,0,0 -APAR,Parliament of Australia / Parliament of Australia. Parliamentary Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VPID,Phillip Island and District Historical Society Inc.,0,0,0,0,0,0,0,2763,0,0,0,0,0 -NPMM,Port Macquarie Historical Society / Port Macquarie Historical Society. Port Macquarie Museum.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VPMH,Port Melbourne Historical & Preservation Society.,0,0,0,0,0,0,0,7393,0,0,0,0,0 -VPPLS,Port Phillip Library Service / Port Phillip Library Service. St Kilda Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -NMUS,Powerhouse Museum. / Research Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VPMEC,Prahran Mechanics' Institute Victorian History Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VPRO,Public Record Office Victoria. / Victorian Archives Centre.,0,0,0,0,0,0,0,1,0,1,0,0,0 -VPRO:VC,Public Record Office Victoria. / Victorian Collections records.,0,0,0,0,0,0,0,84,0,0,0,0,0 -QUT:DC,Queensland University of Technology. / Digital Collections.,419,4,857,0,0,1252,0,0,1,0,0,0,0 -QUT:CJJ,"Queensland University of Technology. / International Journal for Crime, Justice and Social Democracy.",0,1,0,0,0,0,0,0,0,0,0,0,0 -QUT:IR,Queensland University of Technology. / Queensland University of Technology: Institutional Repository.,0,3,0,0,20,7,57,17,40,0,0,0,0 -VRSL,RSL Victoria - Anzac House Reference Library & Memorabilia Collection.,0,0,0,0,0,0,0,38,0,0,0,0,0 -VRDH,Ringwood and District Historical Society.,0,0,0,0,0,0,0,6281,0,0,0,0,0 -NCP,Royal Australasian College of Physicians / Royal Australasian College of Physicians. History of Medicine Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -NRHS,Royal Australian Historical Society / Royal Australian Historical Society. RAHS Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -SRGS,Royal Geographical Society of South Australia / Royal Geographical Society of South Australia. RGSSA Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VRHS,Royal Historical Society of Victoria / Royal Historical Society of Victoria. Royal Historical Society of Victoria.,0,0,0,0,0,0,0,18,0,0,0,0,0 -VRGS,Ruyton Girls' School.,0,0,0,0,0,0,0,27,0,0,0,0,0 -VSOCUM,"School of Chemistry, University of Melbourne.",0,0,0,0,0,0,0,399,0,0,0,0,0 -VSHGM,Sovereign Hill Gold Museum / Sovereign Hill Gold Museum. Sovereign Hill Gold Museum Library.,1,0,0,0,0,0,0,0,0,0,0,0,0 -VSKHS,St Kilda Historical Society.,0,0,0,0,0,0,0,797,0,0,0,0,0 -VU:M,St Mary's College & Newman College Academic Centre Library.,0,0,0,0,0,0,0,2,0,0,0,0,0 -VSV:ARC,St Vincent's Hospital (Melbourne). / St Vincent's Hospital Melbourne Archives.,0,0,0,0,0,0,0,9,0,0,0,0,0 -VSL,State Library Victoria.,4,0,0,1,0,0,0,1,0,1,0,0,0 -NSL,State Library of NSW.,0,0,0,0,0,0,0,2,0,0,0,0,0 -QSL,State Library of Queensland.,0,1702,0,0,11,11,156,24,23,0,0,0,0 -SSL,State Library of South Australia.,0,0,0,0,0,0,0,1,0,0,0,0,0 -WLB,State Library of Western Australia.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VSDH,Sunshine & District Historical Society Incorporated / Sunshine & District Historical Society Incorporated. Sunshine & District Historical Society Incorporated.,0,0,0,0,0,0,0,51,0,0,0,0,0 -VSWT:DC,Swinburne University of Technology. / Swinburne Library. / Swinburne Commons.,402,4,2,0,36,7,4528,8,0,0,0,0,0 -VTAR,Tarrangower History (Maldon & District).,0,0,0,0,0,0,0,162,0,0,0,0,0 -VTAT,Tatura Historical Society / Tatura Historical Society. Tatura Irrigation & Wartime Camps Museum.,0,0,0,0,0,0,0,5094,0,0,0,0,0 -VU:DC,The University of Melbourne. / Digitised collections.,0,0,0,0,0,0,0,19,0,0,0,0,0 -VU:MHW,The University of Melbourne. / Melbourne History Workshop.,0,324,0,0,0,0,0,0,0,0,0,0,0 -VU,The University of Melbourne. / The University of Melbourne Library.,0,0,0,0,0,0,0,2,0,0,0,0,0 -QU,The University of Queensland. / University of Queensland Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -QU:IR,The University of Queensland. / University of Queensland Library. / University of Queensland: Institutional Repository.,0,0,0,0,0,0,118,0,0,0,0,0,0 -NU,The University of Sydney. / University of Sydney Library.,0,0,0,1,0,0,0,1,0,0,0,0,0 -NU:IR,The University of Sydney. / University of Sydney Library. / University of Sydney: Institutional Repository.,0,1,0,0,0,0,1,0,0,0,0,0,0 -VTFB,Thompson's Foundry Band Inc.,0,0,0,0,0,0,0,1515,0,0,0,0,0 -VTRIN,Trinity College.,0,0,0,0,0,0,0,4,0,0,0,0,0 -ANL:DL,Trove Digital Library.,0,0,0,1494,0,0,0,0,0,0,0,0,0 -NUN,UNSW Sydney. / UNSW Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -NUN:IR,UNSW Sydney. / UNSW Library. / University of New South Wales: Institutional Repository.,0,0,0,0,0,0,218,0,0,0,0,0,0 -VUAR,Uniting Church Archives (Vic).,0,0,0,0,0,0,0,1571,0,0,0,0,0 -SUA,University of Adelaide. / Barr Smith Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -AUC:LR,University of Canberra. / National Centre for Australian Children's Literature.,0,0,0,0,0,0,0,1,0,0,0,0,0 -AUC,University of Canberra. / University of Canberra Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VMCD,University of Divinity Libraries.,0,3,0,0,0,0,0,0,0,0,0,0,0 -NUNE:IR,University of New England. / Institutional Repository.,0,4,0,0,0,2,0,1,0,0,0,0,0 -NNCU:A,University of Newcastle Library.,0,0,0,0,0,0,0,2,0,0,0,0,0 -SUSA:IR,University of South Australia. / University of South Australia Library. / University of South Australia: Institutional Repository.,0,4,0,0,0,0,0,0,0,0,0,0,0 -TU,University of Tasmania.,0,0,0,0,0,0,0,1,0,0,0,0,0 -TU:OR,University of Tasmania. / Library Open Repository.,1,9,0,0,0,3,6,0,0,0,0,0,0 -TU:IR,University of Tasmania. / University of Tasmania. Institutional Repository.,0,0,0,0,0,0,1,0,0,0,0,0,0 -NTSM:E,University of Technology Sydney. / UTS ePRESS.,0,3,0,0,0,0,0,0,0,0,0,0,0 -NTSM,University of Technology Sydney. / University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -WU,University of Western Australia. / University of Western Australia Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VDEAF,Vicdeaf.,0,0,0,0,0,0,0,218,0,0,0,0,0 -VPOL:M,Victoria Police Museum.,0,0,0,0,0,0,0,911,0,0,0,0,0 -VVUT,Victoria University. / Victoria University Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VVBL,Victorian Bands' League.,0,0,0,0,0,0,0,21,0,0,0,0,0 -VDOI,"Victorian Government Library Service. / Department of Transport, Planning and Local Infrastructure.",0,0,0,0,0,0,0,1,0,0,0,0,0 -VIP,Victorian Interpretive Projects.,0,0,0,0,0,0,0,301,0,0,0,0,0 -VWARR,Warrnambool and District Historical Society Inc..,0,0,0,0,0,0,0,2122,0,0,0,0,0 -NUWS:IR,Western Sydney University. / Library. / Institutional Repository.,0,0,0,0,0,0,1,0,0,0,0,0,0 -NWML,Willoughby City Library. / Chatswood Central Library.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VYML,Yarra Libraries.,0,0,0,0,0,0,0,1,0,0,0,0,0 -VYRRM,Yarra Ranges Regional Museum.,0,0,0,0,0,0,0,1,0,0,0,0,0 diff --git a/rights-on-out-of-copyright-photos.csv b/rights-on-out-of-copyright-photos.csv deleted file mode 100644 index 49fa283..0000000 --- a/rights-on-out-of-copyright-photos.csv +++ /dev/null @@ -1,160 +0,0 @@ -id,full_name,Free/CC Public Domain,Free/CC BY,Free/CC0,Free/RS NKC,Free with conditions/CC BY-SA,Free with conditions/CC BY-NC,Free with conditions/CC BY-NC-ND,Free with conditions/CC BY-NC-SA,Free with conditions/InC-EDU,Restricted/RS InC,Restricted/RS CNE,Restricted/RS UND -ADFA,"Academy Library, UNSW Canberra.",0,0,0,1,0,0,0,1,0,0,0,0 -SAA,Art Gallery of South Australia / Art Gallery of South Australia. Art Gallery of South Australia Research Library.,0,0,0,1,0,0,0,1,0,0,0,0 -NACU:E,Australian Catholic University. / Australian Catholic University Library.,0,0,0,1,0,0,0,2,0,0,0,0 -VER,Australian Council for Educational Research / Australian Council for Educational Research. Cunningham Library (ACER).,0,0,0,0,0,0,0,1,0,0,0,0 -AIAS,Australian Institute of Aboriginal and Torres Strait Islander Studies. / AIATSIS Collections.,0,0,0,1,0,0,0,2,0,0,0,0 -SL,Australian Lutheran College / Australian Lutheran College. Lohe Memorial Library.,0,0,0,1,0,0,0,1,0,0,0,0 -NAMU,Australian Museum. / Australian Museum Research Library.,0,0,0,1,0,0,0,1,0,0,0,0 -ANU,Australian National University. / Australian National University Library.,0,0,0,1,0,0,0,1,0,0,0,0 -ANU:IR,Australian National University. / Australian National University Library. / Australian National University: Institutional Repository.,0,2,0,0,0,0,0,0,0,0,0,0 -ANU:HITL,Australian National University. / Heritage in the Limelight.,0,1,0,0,0,0,0,0,0,0,0,0 -APC:WC,Australian Paralympic Committee / Australian Paralympic Committee. Images on Wikimedia Commons.,0,0,0,0,1,0,0,0,0,0,0,0 -VBAH,Ballarat Heritage Services.,0,0,0,0,0,0,0,17,0,0,0,0 -VGH,Barwon Health / Barwon Health. Barwon Health Library Service.,0,0,0,0,0,0,0,1,0,0,0,0 -XBATCH,Batchelor Institute of Indigenous Tertiary Education.,0,0,0,1,0,0,0,1,0,0,0,0 -XBATCH:B,Batchelor Institute of Indigenous Tertiary Education. / Batchelor Joint Use Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VBAY,Bayside Library Service / Bayside Library Service. Bayside Library Service.,0,0,0,1,0,0,0,3,0,0,0,0 -TBMHC,Beaconsfield Mine and Heritage Centre.,0,1,0,0,0,0,0,0,0,0,0,0 -VRDN,Bolton Clarke Library Services.,0,0,0,0,0,0,0,34,0,0,0,0 -QBON,Bond University. / John and Alison Kearney Library .,0,0,0,1,0,0,0,1,0,0,0,0 -VECA,Campaspe Regional Library / Campaspe Regional Library. Echuca Library.,0,0,0,0,0,0,0,1,0,0,0,0 -ACIT,Canberra Institute of Technology / Canberra Institute of Technology. Reid Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VCCLC,Casey - Cardinia Library Corporation / Casey - Cardinia Library Corporation. Cranbourne Library.,0,0,0,0,0,0,0,2,0,0,0,0 -VBALL,Central Highlands Libraries / Central Highlands Libraries. Ballarat Library.,0,0,0,0,0,0,0,1,0,0,0,0 -XNTU,Charles Darwin University. / Casuarina Campus Library.,0,0,0,1,0,0,0,1,0,0,0,0 -NCSU:A,Charles Sturt University. / Albury-Wodonga Campus Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VMT,Chisholm Institute.,0,0,0,0,0,0,0,1,0,0,0,0 -VBOR,City of Boroondara Library Service.,0,0,0,0,0,0,0,2,0,0,0,0 -VDGV,City of Greater Dandenong Libraries / City of Greater Dandenong Libraries. Dandenong Library.,0,0,0,0,0,0,0,1,0,0,0,0 -NSCA,City of Sydney Archives.,323,5158,0,0,0,0,0,0,0,0,0,0 -NCLL,Clarence Regional Library / Clarence Regional Library. Clarence Regional Library Headquarters.,0,0,0,1,0,0,0,1,0,0,0,0 -NCHR,Coffs Collections.,0,4373,0,0,0,0,0,0,0,0,0,0 -VCGL,Corangamite Regional Library Corporation / Corangamite Regional Library Corporation. Corangamite Regional Library Corporation Library.,0,0,0,1,0,0,0,1,0,0,0,0 -WCU,Curtin University. / Curtin University Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VPRE,Darebin Libraries / Darebin Libraries. Preston Library.,0,0,0,0,0,0,0,1,0,0,0,0 -VDU,Deakin University. / Deakin University Library.,4,0,0,1,0,0,0,1,0,0,0,0 -VDU:IR,Deakin University. / Deakin University Library. / Deakin University: Institutional Repository.,7,0,0,0,0,0,0,0,0,0,0,0 -VDSTO,Defence Science and Technology Group. / DSTG Research Data and Information.,0,0,0,0,0,0,0,1,0,0,0,0 -WCX,Edith Cowan University. / Edith Cowan University Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VELTH,Eltham District Historical Society.,0,0,0,0,0,0,0,22,0,0,0,0 -SEPR,Eyre Pensinsula Railway Preservation Society.,0,0,0,224,0,0,0,0,0,0,0,0 -NFDC:S,Federal Court of Australia. / Library & Information Services (South Australia).,0,0,0,1,0,0,0,1,0,0,0,0 -NFDC:V,Federal Court of Australia. / Library & Information Services (Victoria).,0,0,0,0,0,0,0,1,0,0,0,0 -NFDC:W,Federal Court of Australia. / Library & Information Services (Western Australia).,0,0,0,1,0,0,0,1,0,0,0,0 -VFED,Federation University Australia Library.,0,0,0,0,0,0,0,1,0,0,0,0 -VFED:G,Federation University Australia Library. / Federation University Australia - Gippsland campus library.,0,0,0,1,0,0,0,2,0,0,0,0 -VFED:HC,Federation University Australia Library. / Federation University Historical Collection.,0,0,0,0,0,0,0,28,0,0,0,0 -YUF,Flickr.,1263,847,1,0,778,64,703,398,2,0,0,0 -SFU,Flinders University. / Flinders University Central Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VFRK,Frankston Library Service / Frankston Library Service. Frankston City Libraries .,0,0,0,0,0,0,0,1,0,0,0,0 -VKER,Gannawarra Library Service / Gannawarra Library Service. Sir John Gorton Library - Kerang.,0,0,0,0,0,0,0,2,0,0,0,0 -VGCC,Geelong Cycling Club.,0,0,0,0,0,0,0,2,0,0,0,0 -VGEE,Geelong Regional Library Corporation / Geelong Regional Library Corporation. Library Administration.,0,0,0,1,0,0,0,1,0,0,0,0 -VGEHS,Glen Eira Historical Society.,0,0,0,0,0,0,0,1,0,0,0,0 -VGLEN,Glenelg Shire Council / Glenelg Shire Council. Glenelg Libraries.,0,0,0,0,0,0,0,2,0,0,0,0 -VNCG,Goldfields Library Corporation.,0,0,0,0,0,0,0,1,0,0,0,0 -VGBV,Goulburn Ovens Institute of TAFE / Goulburn Ovens Institute of TAFE. Goulburn Ovens Institute of TAFE Library.,0,0,0,0,0,0,0,1,0,0,0,0 -VGVH,Goulburn Valley Libraries / Goulburn Valley Libraries. Shepparton City Library.,0,0,0,0,0,0,0,1,0,0,0,0 -QGU,Griffith University. / Griffith University Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VNEA,High Country Library Network.,0,0,0,0,0,0,0,1,0,0,0,0 -VHOB,Hobsons Bay Libraries.,0,0,0,0,0,0,0,1,0,0,0,0 -VBBM,Indigo Shire Council. / Beechworth Burke Museum.,0,0,0,0,0,0,0,1,0,0,0,0 -NINV,Inverell Shire Council / Inverell Shire Council. Inverell Shire Public Library.,0,0,0,1,0,0,0,1,0,0,0,0 -QJCU,James Cook University. / Eddie Koiki Mabo Library.,0,0,0,1,0,0,0,1,0,0,0,0 -QJCU:IR,James Cook University. / Eddie Koiki Mabo Library. / Townsville Campus Library: Institutional Repository.,0,0,0,0,0,0,3,0,0,0,0,0 -VKHI,Kew Historical Society.,0,0,0,0,0,0,0,26,0,0,0,0 -VKING,Kingston Information and Library Service. / Kingston Information & Library Service.,0,0,0,0,0,0,0,2,0,0,0,0 -VBX,"La Trobe University. / La Trobe University Library. / Bendigo Campus, Heyward Library.",0,0,0,1,0,0,0,2,0,0,0,0 -VLU,"La Trobe University. / La Trobe University Library. / Borchardt Library, Melbourne (Bundoora) Campus.",0,0,0,1,0,0,0,1,0,0,0,0 -VSCT,La Trobe University. / Sunraysia Institute of TAFE AND La Trobe University - Mildura Campus Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VLTV,Latrobe City Library / Latrobe City Library. Morwell Public Library.,0,0,0,0,0,0,0,1,0,0,0,0 -APLS,Libraries ACT.,0,0,0,1,0,0,0,1,0,0,0,0 -TSL,Libraries Tasmania.,0,0,0,1,0,0,0,1,0,0,0,0 -XNLS,Library & Archives NT.,0,1421,0,1,0,0,0,1,0,0,0,0 -XNLS:GC,Library & Archives NT. / General Collection.,0,0,0,1,0,0,0,1,0,0,0,0 -XNLS:GS,Library & Archives NT. / Offsite Storage.,0,0,0,1,0,0,0,1,0,0,0,0 -NMQU,Macquarie University. / Macquarie University Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VMC,Mazenod College / Mazenod College. Mazenod College Library.,0,0,0,0,0,0,0,1,0,0,0,0 -VNMT,Melbourne Polytechnic.,0,0,0,0,0,0,0,1,0,0,0,0 -VMLT:CHC,Melton City Council Library Service. / Community Heritage Collection.,0,0,0,0,0,15,0,0,0,0,0,0 -VMLT,Melton City Council Library Service. / Melton Library & Learning Hub.,0,0,0,1,0,0,0,2,0,0,0,0 -VMIL,Mildura Rural City Council Library Service / Mildura Rural City Council Library Service. Mildura Library.,0,0,0,0,0,0,0,1,0,0,0,0 -VMON,Monash Public Library Service / Monash Public Library Service. Monash Public Library Service.,0,0,0,0,0,0,0,1,0,0,0,0 -VMOU,Monash University. / Monash University Library.,0,0,0,1,0,0,0,2,0,0,0,0 -VMVL,Moonee Valley Libraries / Moonee Valley Libraries. Moonee Valley Libraries.,0,0,0,0,0,0,0,1,0,0,0,0 -VMOR,Moreland City Libraries / Moreland City Libraries. Coburg Library.,0,0,0,0,0,0,0,1,0,0,0,0 -VPEN,Mornington Peninsula Library / Mornington Peninsula Library. Mornington Peninsula Library Service.,0,0,0,0,0,0,0,1,0,0,0,0 -WMDU,Murdoch University. / Murdoch University Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VNMU:I,Museums Victoria. / Museums Victoria Collections.,0,33388,0,0,0,0,0,0,0,0,0,0 -VNMU,Museums Victoria. / Museums Victoria Library.,0,0,0,1,0,0,0,1,0,0,0,0 -XNTA,Museums and Art Galleries of the Northern Territory / Museums and Art Galleries of the Northern Territory. Peter Spillett Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VWGP,Myli - My Community Library / Myli - My Community Library. Regional Support Centre.,0,0,0,0,0,0,0,1,0,0,0,0 -XKATR,NT Schools Katherine Region.,0,0,0,1,0,0,0,1,0,0,0,0 -XKATR:KH,NT Schools Katherine Region. / Katherine High School / Library Resource Centre.,0,0,0,1,0,0,0,1,0,0,0,0 -ANG,National Gallery of Australia. / National Gallery of Australia Research Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VNG,National Gallery of Victoria. / The Shaw Research Library.,0,0,0,1,0,0,0,1,0,0,0,0 -ANL,National Library of Australia.,0,0,0,4,0,0,0,1,0,1,0,0 -AMOA:C,National Museum of Australia. / National Museum of Australia Collection.,347,0,0,0,26,0,0,0,0,117,393,1 -AMOA,National Museum of Australia. / National Museum of Australia Research Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VNWM,National Wool Museum.,0,0,0,0,0,0,0,3,0,0,0,0 -VORH:M,Orbost Historical Society Museum. / Orbost & District Historical Society.,0,0,0,0,0,0,0,14,0,0,0,0 -NPEN,Penrith City Council / Penrith City Council. Penrith City Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VPID,Phillip Island and District Historical Society Inc.,0,0,0,0,0,0,0,340,0,0,0,0 -VPMH,Port Melbourne Historical & Preservation Society.,0,0,0,0,0,0,0,58,0,0,0,0 -VPPLS,Port Phillip Library Service / Port Phillip Library Service. St Kilda Library.,0,0,0,0,0,0,0,2,0,0,0,0 -VPMEC,Prahran Mechanics' Institute Victorian History Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VPRO,Public Record Office Victoria. / Victorian Archives Centre.,0,0,0,0,0,0,0,1,0,1,0,0 -TQVM,Queen Victoria Museum and Art Gallery / Queen Victoria Museum and Art Gallery. Queen Victoria Museum and Art Gallery Library.,0,0,0,1,0,0,0,1,0,0,0,0 -QMU,Queensland Museum. / Queensland Museum Library.,0,0,0,1,0,0,0,1,0,0,0,0 -QUT,Queensland University of Technology.,0,0,0,1,0,0,0,1,0,0,0,0 -QUT:DC,Queensland University of Technology. / Digital Collections.,100,3,405,0,0,0,0,0,0,0,0,0 -VIT,RMIT University. / RMIT University Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VRDH,Ringwood and District Historical Society.,0,0,0,0,0,0,0,4,0,0,0,0 -NRHS,Royal Australian Historical Society / Royal Australian Historical Society. RAHS Library.,0,0,0,1,0,0,0,1,0,0,0,0 -SRGS,Royal Geographical Society of South Australia / Royal Geographical Society of South Australia. RGSSA Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VRHS,Royal Historical Society of Victoria / Royal Historical Society of Victoria. Royal Historical Society of Victoria.,0,0,0,1,0,0,0,2,0,0,0,0 -NSAG,Society of Australian Genealogists / Society of Australian Genealogists. SAG Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VSWC,South West Institute of TAFE / South West Institute of TAFE. South West Institute of TAFE Library.,0,0,0,0,0,0,0,1,0,0,0,0 -VSHGM,Sovereign Hill Gold Museum / Sovereign Hill Gold Museum. Sovereign Hill Gold Museum Library.,1,0,0,0,0,0,0,0,0,0,0,0 -VU:M,St Mary's College & Newman College Academic Centre Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VSL,State Library Victoria.,1,0,0,2,0,0,0,2,0,1,0,0 -NSL,State Library of NSW.,0,0,0,1,0,0,0,2,0,0,0,0 -QSL,State Library of Queensland.,0,16,0,1,0,0,0,1,0,0,0,0 -SSL,State Library of South Australia.,0,0,0,1,0,0,0,1,0,0,0,0 -WLB,State Library of Western Australia.,0,0,0,1,0,0,0,1,0,0,0,0 -VSC,Supreme Court of Victoria / Supreme Court of Victoria. The Law Library of Victoria.,0,0,0,0,0,0,0,2,0,0,0,0 -VSWN,Swan Hill Regional Library Service / Swan Hill Regional Library Service. Swan Hill Library.,0,0,0,0,0,0,0,1,0,0,0,0 -VSWT:DC,Swinburne University of Technology. / Swinburne Library. / Swinburne Commons.,377,0,0,0,0,0,105,0,0,0,0,0 -VEGC,TAFE Gippsland. / TAFE Gippsland.,0,0,0,0,0,0,0,1,0,0,0,0 -VTAT,Tatura Historical Society / Tatura Historical Society. Tatura Irrigation & Wartime Camps Museum.,0,0,0,0,0,0,0,34,0,0,0,0 -VC,The Carmelite Library .,0,0,0,1,0,0,0,1,0,0,0,0 -VU:MHW,The University of Melbourne. / Melbourne History Workshop.,0,18,0,0,0,0,0,0,0,0,0,0 -VU,The University of Melbourne. / The University of Melbourne Library.,0,0,0,1,0,0,0,2,0,0,0,0 -QU,The University of Queensland. / University of Queensland Library.,0,0,0,1,0,0,0,1,0,0,0,0 -QU:IR,The University of Queensland. / University of Queensland Library. / University of Queensland: Institutional Repository.,0,0,0,0,0,0,14,0,0,0,0,0 -NU,The University of Sydney. / University of Sydney Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VTFB,Thompson's Foundry Band Inc.,0,0,0,0,0,0,0,1,0,0,0,0 -ANL:DL,Trove Digital Library.,0,0,0,1274,0,0,0,1,0,0,0,0 -NUN,UNSW Sydney. / UNSW Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VUAR,Uniting Church Archives (Vic).,0,0,0,0,0,0,0,1,0,0,0,0 -SUA,University of Adelaide. / Barr Smith Library.,0,0,0,1,0,0,0,1,0,0,0,0 -NNCU:A,University of Newcastle Library.,0,0,0,1,0,0,0,1,0,0,0,0 -WUND,University of Notre Dame Australia. / St Teresa's Library.,0,0,0,1,0,0,0,1,0,0,0,0 -SUSA,University of South Australia. / University of South Australia Library.,0,0,0,1,0,0,0,1,0,0,0,0 -TU,University of Tasmania.,0,0,0,1,0,0,0,1,0,0,0,0 -TU:OR,University of Tasmania. / Library Open Repository.,1,1,0,0,0,2,5,0,0,0,0,0 -WU,University of Western Australia. / University of Western Australia Library.,0,0,0,1,0,0,0,1,0,0,0,0 -NWU,University of Wollongong. / University of Wollongong Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VPOL:M,Victoria Police Museum.,0,0,0,0,0,0,0,11,0,0,0,0 -VVUT,Victoria University. / Victoria University Library.,0,0,0,0,0,0,0,2,0,0,0,0 -VACL,Victorian Aboriginal Corporation for Languages.,0,0,0,1,0,0,0,3,0,0,0,0 -VDPI,Victorian Government Library Service. / Department of Environment and Primary Industries.,0,0,0,1,0,0,0,2,0,0,0,0 -VDOI,"Victorian Government Library Service. / Department of Transport, Planning and Local Infrastructure.",0,0,0,1,0,0,0,2,0,0,0,0 -VWARR,Warrnambool and District Historical Society Inc..,0,0,0,0,0,0,0,4,0,0,0,0 -NWAV,Waverley Council / Waverley Council. Waverley Library.,0,0,0,1,0,0,0,1,0,0,0,0 -WMU,Western Australian Museum / Western Australian Museum. Western Australian Museum Library.,0,0,0,1,0,0,0,1,0,0,0,0 -NUWS:W,Western Sydney University. / Penrith Campus Library.,0,0,0,1,0,0,0,1,0,0,0,0 -VWMR,Whitehorse Manningham Regional Library Corporation / Whitehorse Manningham Regional Library Corporation. Whitehorse Manningham Libraries.,0,0,0,0,0,0,0,1,0,0,0,0 -VWIM,Wimmera Regional Library Corporation. / Horsham Libraries.,0,0,0,0,0,0,0,1,0,0,0,0 -VWYN,Wyndham City Council Library Service / Wyndham City Council Library Service. Wyndham City Libraries.,0,0,0,0,0,0,0,1,0,0,0,0 diff --git a/rights-statements-on-images.ipynb b/rights-statements-on-images.ipynb index 0db7eea..650cc27 100644 --- a/rights-statements-on-images.ipynb +++ b/rights-statements-on-images.ipynb @@ -2,7 +2,13 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, "source": [ "# The use of standard licences and rights statements in Trove image records\n", "\n", @@ -25,15 +31,32 @@ }, { "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [], + "execution_count": 4, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "import os\n", - "import time\n", "\n", "import pandas as pd\n", "import requests_cache\n", + "from dotenv import load_dotenv\n", "from requests.adapters import HTTPAdapter\n", "from requests.packages.urllib3.util.retry import Retry\n", "from tqdm.notebook import tqdm\n", @@ -42,25 +65,14 @@ "s = requests_cache.CachedSession()\n", "retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])\n", "s.mount(\"http://\", HTTPAdapter(max_retries=retries))\n", - "s.mount(\"https://\", HTTPAdapter(max_retries=retries))" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "# Load variables from the .env file if it exists\n", - "# Use %%capture to suppress messages\n", - "%load_ext dotenv\n", - "%dotenv" + "s.mount(\"https://\", HTTPAdapter(max_retries=retries))\n", + "\n", + "load_dotenv()" ] }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -92,8 +104,14 @@ }, { "cell_type": "code", - "execution_count": 32, - "metadata": {}, + "execution_count": 39, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, "outputs": [], "source": [ "# Insert your Trove API key\n", @@ -101,13 +119,21 @@ "\n", "# Use api key value from environment variables if it is available\n", "if os.getenv(\"TROVE_API_KEY\"):\n", - " API_KEY = os.getenv(\"TROVE_API_KEY\")" + " API_KEY = os.getenv(\"TROVE_API_KEY\")\n", + "\n", + "HEADERS = {\"X-API-KEY\": API_KEY}" ] }, { "cell_type": "code", - "execution_count": 33, - "metadata": {}, + "execution_count": 41, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, "outputs": [], "source": [ "def save_summary(contributors, record, parent=None):\n", @@ -121,11 +147,11 @@ " summary[\"full_name\"] = f'{parent[\"full_name\"]} / {record[\"name\"]}'\n", " elif \"parent\" in record:\n", " summary[\"parent_id\"] = record[\"parent\"][\"id\"]\n", - " summary[\"full_name\"] = f'{record[\"parent\"][\"value\"]} / {record[\"name\"]}'\n", + " summary[\"full_name\"] = f'{record[\"parent\"][\"name\"]} / {record[\"name\"]}'\n", " else:\n", " summary[\"full_name\"] = record[\"name\"]\n", " if \"children\" in record:\n", - " for child in record[\"children\"][\"contributor\"]:\n", + " for child in record[\"children\"]:\n", " save_summary(contributors, child, summary)\n", " contributors.append(summary)\n", "\n", @@ -138,20 +164,27 @@ " contributors = []\n", " contrib_params = {\"key\": API_KEY, \"encoding\": \"json\", \"reclevel\": \"full\"}\n", " response = s.get(\n", - " \"https://api.trove.nla.gov.au/v2/contributor/\",\n", + " \"https://api.trove.nla.gov.au/v3/contributor/\",\n", " params=contrib_params,\n", + " headers=HEADERS,\n", " timeout=60,\n", " )\n", " data = response.json()\n", - " for record in data[\"response\"][\"contributor\"]:\n", + " for record in data[\"contributor\"]:\n", " save_summary(contributors, record)\n", " return contributors" ] }, { "cell_type": "code", - "execution_count": 34, - "metadata": {}, + "execution_count": 42, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, "outputs": [], "source": [ "def contributor_has_results(contrib, params, additional_query):\n", @@ -164,10 +197,14 @@ " query += f\" {additional_query}\"\n", " params[\"q\"] = query\n", " response = s.get(\n", - " \"https://api.trove.nla.gov.au/v2/result\", params=params, timeout=60\n", + " \"https://api.trove.nla.gov.au/v3/result\",\n", + " params=params,\n", + " headers=HEADERS,\n", + " timeout=60,\n", " )\n", + "\n", " data = response.json()\n", - " total = int(data[\"response\"][\"zone\"][0][\"records\"][\"total\"])\n", + " total = int(data[\"category\"][0][\"records\"][\"total\"])\n", " if total > 0:\n", " return True\n", "\n", @@ -178,7 +215,8 @@ " \"\"\"\n", " contributors = get_contributors()\n", " licence_counts = []\n", - " params = {\"key\": API_KEY, \"encoding\": \"json\", \"zone\": \"picture\", \"n\": 0}\n", + " params = {\"encoding\": \"json\", \"category\": \"image\", \"n\": 0}\n", + "\n", " for contrib in tqdm(contributors):\n", " # If there are no results for this contributor then there's no point checking for licences\n", " # This should save a bit of time\n", @@ -194,15 +232,14 @@ " query += f\" {additional_query}\"\n", " params[\"q\"] = query\n", " response = s.get(\n", - " \"https://api.trove.nla.gov.au/v2/result\",\n", + " \"https://api.trove.nla.gov.au/v3/result\",\n", " params=params,\n", + " headers=HEADERS,\n", " timeout=60,\n", " )\n", " data = response.json()\n", - " total = data[\"response\"][\"zone\"][0][\"records\"][\"total\"]\n", + " total = data[\"category\"][0][\"records\"][\"total\"]\n", " contrib_row[licence] = int(total)\n", - " if not response.from_cache:\n", - " time.sleep(0.2)\n", " # print(contrib_row)\n", " licence_counts.append(contrib_row)\n", " return licence_counts" @@ -211,7 +248,15 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "licence_counts_not_books = licence_counts_by_institution('NOT format:\"Book\"')" @@ -226,8 +271,16 @@ }, { "cell_type": "code", - "execution_count": 36, - "metadata": {}, + "execution_count": 44, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "df = pd.DataFrame(licence_counts_not_books)" @@ -235,8 +288,16 @@ }, { "cell_type": "code", - "execution_count": 37, - "metadata": {}, + "execution_count": 45, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "# Fill empty totals with zeros & make them all integers\n", @@ -245,36 +306,44 @@ }, { "cell_type": "code", - "execution_count": 38, - "metadata": {}, + "execution_count": 46, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [ { "data": { "text/plain": [ - "Free/CC Public Domain 269877\n", - "Free/CC BY 221770\n", - "Free/CC0 940\n", - "Free/RS NKC 2134\n", + "Free/CC Public Domain 308691\n", + "Free/CC BY 171779\n", + "Free/CC0 2130\n", + "Free/RS NKC 5892\n", "Free/RS Noc-US 0\n", "Free with conditions/CC BY-ND 0\n", - "Free with conditions/CC BY-SA 11994\n", - "Free with conditions/CC BY-NC 22332\n", - "Free with conditions/CC BY-NC-ND 22901\n", - "Free with conditions/CC BY-NC-SA 109934\n", + "Free with conditions/CC BY-SA 13045\n", + "Free with conditions/CC BY-NC 23991\n", + "Free with conditions/CC BY-NC-ND 25022\n", + "Free with conditions/CC BY-NC-SA 125873\n", "Free with conditions/RS NoC-NC 0\n", "Free with conditions/InC-NC 0\n", - "Free with conditions/InC-EDU 4466\n", - "Restricted/RS InC 13963\n", + "Free with conditions/InC-EDU 4639\n", + "Restricted/RS InC 14613\n", "Restricted/RS InC-OW-EU 0\n", "Restricted/RS InC-RUU 1\n", - "Restricted/RS CNE 9209\n", - "Restricted/RS UND 400\n", + "Restricted/RS CNE 12868\n", + "Restricted/RS UND 415\n", "Restricted/NoC-CR 0\n", "Restricted/NoC-OKLR 0\n", "dtype: int64" ] }, - "execution_count": 38, + "execution_count": 46, "metadata": {}, "output_type": "execute_result" } @@ -286,8 +355,16 @@ }, { "cell_type": "code", - "execution_count": 39, - "metadata": {}, + "execution_count": 47, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "# Remove columns we don't need\n", @@ -296,8 +373,16 @@ }, { "cell_type": "code", - "execution_count": 40, - "metadata": {}, + "execution_count": 48, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "# Remove rows that add up to zero\n", @@ -306,8 +391,16 @@ }, { "cell_type": "code", - "execution_count": 41, - "metadata": {}, + "execution_count": 49, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "# Remove columns that are all zero\n", @@ -316,8 +409,12 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 50, "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, "tags": [ "nbval-skip" ] @@ -330,16 +427,13 @@ }, { "cell_type": "markdown", - "metadata": {}, - "source": [ - "See the results here:\n", - "\n", - "* [rights-on-images.csv](rights-on-images.csv)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, "source": [ "## Are there any licences applied to out-of-copyright images?\n", "\n", @@ -349,7 +443,15 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "licence_counts_out_of_copyright = licence_counts_by_institution(\n", @@ -359,8 +461,16 @@ }, { "cell_type": "code", - "execution_count": 44, - "metadata": {}, + "execution_count": 52, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "df2 = pd.DataFrame(licence_counts_out_of_copyright)" @@ -368,8 +478,16 @@ }, { "cell_type": "code", - "execution_count": 45, - "metadata": {}, + "execution_count": 53, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "# Fill empty totals with zeros & make them all integers\n", @@ -378,36 +496,44 @@ }, { "cell_type": "code", - "execution_count": 46, - "metadata": {}, + "execution_count": 55, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [ { "data": { "text/plain": [ - "Free/CC Public Domain 2424\n", - "Free/CC BY 45229\n", - "Free/CC0 406\n", - "Free/RS NKC 1583\n", + "Free/CC Public Domain 30088\n", + "Free/CC BY 15017\n", + "Free/CC0 653\n", + "Free/RS NKC 1537\n", "Free/RS Noc-US 0\n", "Free with conditions/CC BY-ND 0\n", - "Free with conditions/CC BY-SA 805\n", - "Free with conditions/CC BY-NC 81\n", - "Free with conditions/CC BY-NC-ND 830\n", - "Free with conditions/CC BY-NC-SA 1145\n", + "Free with conditions/CC BY-SA 934\n", + "Free with conditions/CC BY-NC 84\n", + "Free with conditions/CC BY-NC-ND 829\n", + "Free with conditions/CC BY-NC-SA 1412\n", "Free with conditions/RS NoC-NC 0\n", "Free with conditions/InC-NC 0\n", "Free with conditions/InC-EDU 2\n", - "Restricted/RS InC 120\n", + "Restricted/RS InC 128\n", "Restricted/RS InC-OW-EU 0\n", "Restricted/RS InC-RUU 0\n", - "Restricted/RS CNE 393\n", - "Restricted/RS UND 1\n", + "Restricted/RS CNE 572\n", + "Restricted/RS UND 2\n", "Restricted/NoC-CR 0\n", "Restricted/NoC-OKLR 0\n", "dtype: int64" ] }, - "execution_count": 46, + "execution_count": 55, "metadata": {}, "output_type": "execute_result" } @@ -419,8 +545,16 @@ }, { "cell_type": "code", - "execution_count": 47, - "metadata": {}, + "execution_count": 56, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "# Remove columns we don't need\n", @@ -429,8 +563,16 @@ }, { "cell_type": "code", - "execution_count": 48, - "metadata": {}, + "execution_count": 57, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "# Remove rows that add up to zero\n", @@ -439,8 +581,16 @@ }, { "cell_type": "code", - "execution_count": 49, - "metadata": {}, + "execution_count": 58, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [ + "nbval-skip" + ] + }, "outputs": [], "source": [ "# Remove columns that are all zero\n", @@ -449,8 +599,12 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 59, "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, "tags": [ "nbval-skip" ] @@ -464,17 +618,64 @@ ] }, { - "cell_type": "markdown", - "metadata": {}, + "cell_type": "code", + "execution_count": null, + "metadata": { + "editable": true, + "jupyter": { + "source_hidden": true + }, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], "source": [ - "See the results here:\n", + "# IGNORE THIS CELL -- FOR TESTING ONLY\n", + "\n", + "if os.getenv(\"GW_STATUS\") == \"dev\":\n", + "\n", + " def get_contributors_sample():\n", + " \"\"\"\n", + " Get a sample of contributors from the Trove API for testing.\n", + " Flatten all the nested records.\n", + " \"\"\"\n", + " contributors = []\n", + " contrib_params = {\"key\": API_KEY, \"encoding\": \"json\", \"reclevel\": \"full\"}\n", + " response = s.get(\n", + " \"https://api.trove.nla.gov.au/v3/contributor/\",\n", + " params=contrib_params,\n", + " headers=HEADERS,\n", + " timeout=60,\n", + " )\n", + " data = response.json()\n", + " for record in data[\"contributor\"]:\n", + " save_summary(contributors, record)\n", + " return contributors[:10]\n", + "\n", + " get_contributors = get_contributors_sample\n", + "\n", + " licence_counts_not_books = licence_counts_by_institution('NOT format:\"Book\"')\n", + "\n", + " df = pd.DataFrame(licence_counts_not_books)\n", + "\n", + " licence_counts_out_of_copyright = licence_counts_by_institution(\n", + " \"format:Photograph date:[* TO 1954]\"\n", + " )\n", "\n", - "* [rights-on-out-of-copyright-photos.csv](rights-on-out-of-copyright-photos.csv)" + " df2 = pd.DataFrame(licence_counts_out_of_copyright)" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, "source": [ "----\n", "\n", @@ -499,7 +700,40 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.10.12" + }, + "rocrate": { + "action": [ + { + "description": "This dataset includes information about the application of licences and rights statements to images by Trove contributors.", + "isPartOf": "https://github.com/GLAM-Workbench/trove-images-rights-data/", + "mainEntityOfPage": "https://glam-workbench.net/trove-images/trove-images-rights-data/", + "name": "Licences and rights statements applied to images by Trove contributors", + "result": [ + { + "description": "This dataset lists the number of images with each rights statement from organisations contributing to Trove.", + "license": "https://creativecommons.org/publicdomain/zero/1.0/", + "url": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-images.csv" + }, + { + "description": "This dataset lists the number of out-of-copyright photographs with each rights statement from organisations contributing to Trove.", + "license": "https://creativecommons.org/publicdomain/zero/1.0/", + "url": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-out-of-copyright-photos.csv" + } + ] + } + ], + "author": [ + { + "mainEntityOfPage": "https://timsherratt.au", + "name": "Sherratt, Tim", + "orcid": "https://orcid.org/0000-0001-7956-4498" + } + ], + "description": "This notebook uses Trove's `rights` index to build a picture of which licences and rights statements are currently being applied to images, and by who.", + "mainEntityOfPage": "https://glam-workbench.net/trove-images/use-of-rights-statements/", + "name": "The use of standard licences and rights statements in Trove image records", + "url": "https://github.com/GLAM-Workbench/trove-images/blob/master/rights-statements-on-images.ipynb" } }, "nbformat": 4, diff --git a/ro-crate-metadata.json b/ro-crate-metadata.json new file mode 100644 index 0000000..e4981f0 --- /dev/null +++ b/ro-crate-metadata.json @@ -0,0 +1,273 @@ +{ + "@context": "https://w3id.org/ro/crate/1.1/context", + "@graph": [ + { + "@id": "./", + "@type": "Dataset", + "author": [ + { + "@id": "https://orcid.org/0000-0001-7956-4498" + } + ], + "datePublished": "2024-04-24T04:32:27+00:00", + "description": "A GLAM Workbench repository", + "hasPart": [ + { + "@id": "download-image-collection.ipynb" + }, + { + "@id": "rights-statements-on-images.ipynb" + }, + { + "@id": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-images.csv" + }, + { + "@id": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-out-of-copyright-photos.csv" + } + ], + "license": { + "@id": "https://spdx.org/licenses/MIT" + }, + "mainEntityOfPage": { + "@id": "https://glam-workbench.net/trove-images" + }, + "name": "trove-images", + "url": "https://github.com/GLAM-Workbench/trove-images/" + }, + { + "@id": "ro-crate-metadata.json", + "@type": "CreativeWork", + "about": { + "@id": "./" + }, + "conformsTo": { + "@id": "https://w3id.org/ro/crate/1.1" + }, + "license": { + "@id": "https://creativecommons.org/publicdomain/zero/1.0/" + } + }, + { + "@id": "https://glam-workbench.net/trove-images", + "@type": "CreativeWork", + "isPartOf": { + "@id": "https://glam-workbench.net/" + }, + "name": "Trove images", + "url": "https://glam-workbench.net/trove-images" + }, + { + "@id": "https://glam-workbench.net/", + "@type": "CreativeWork", + "author": [ + { + "@id": "https://orcid.org/0000-0001-7956-4498" + } + ], + "description": "A collection of tools, tutorials, examples, and hacks to help researchers work with data from galleries, libraries, archives, and museums (the GLAM sector).", + "name": "GLAM Workbench", + "url": "https://glam-workbench.net/" + }, + { + "@id": "https://orcid.org/0000-0001-7956-4498", + "@type": "Person", + "mainEntityOfPage": "https://timsherratt.au", + "name": "Sherratt, Tim", + "orcid": "https://orcid.org/0000-0001-7956-4498" + }, + { + "@id": "https://spdx.org/licenses/MIT", + "@type": "CreativeWork", + "name": "MIT License", + "url": "https://spdx.org/licenses/MIT.html" + }, + { + "@id": "https://creativecommons.org/publicdomain/zero/1.0/", + "@type": "CreativeWork", + "name": "CC0 Public Domain Dedication", + "url": "https://creativecommons.org/publicdomain/zero/1.0/" + }, + { + "@id": "http://rightsstatements.org/vocab/NKC/1.0/", + "@type": "CreativeWork", + "description": "The organization that has made the Item available reasonably believes that the Item is not restricted by copyright or related rights, but a conclusive determination could not be made.", + "name": "No Known Copyright", + "url": "http://rightsstatements.org/vocab/NKC/1.0/" + }, + { + "@id": "http://rightsstatements.org/vocab/CNE/1.0/", + "@type": "CreativeWork", + "description": "The copyright and related rights status of this Item has not been evaluated.", + "name": "Copyright Not Evaluated", + "url": "http://rightsstatements.org/vocab/CNE/1.0/" + }, + { + "@id": "https://www.python.org/downloads/release/python-31012/", + "@type": [ + "ComputerLanguage", + "SoftwareApplication" + ], + "name": "Python 3.10.12", + "url": "https://www.python.org/downloads/release/python-31012/", + "version": "3.10.12" + }, + { + "@id": "https://glam-workbench.net/trove-images/download-image-collection/", + "@type": "CreativeWork", + "isPartOf": { + "@id": "https://glam-workbench.net" + }, + "name": "Download a collection of digitised images", + "url": "https://glam-workbench.net/trove-images/download-image-collection/" + }, + { + "@id": "download-image-collection.ipynb", + "@type": [ + "File", + "SoftwareSourceCode" + ], + "author": [ + { + "@id": "https://orcid.org/0000-0001-7956-4498" + } + ], + "category": "", + "codeRepository": "https://github.com/GLAM-Workbench/trove-images/", + "conformsTo": { + "@id": "https://purl.archive.org/textcommons/profile#Notebook" + }, + "description": "Digitised photographs and other images are often organised into collections. While the Trove web interface does include a download option for collections, it has a number of limitations. This notebook provides an alternative method that downloads all of the available images in a collection (and any sub-collections) at the highest available resolution.", + "encodingFormat": "application/x-ipynb+json", + "mainEntityOfPage": { + "@id": "https://glam-workbench.net/trove-images/download-image-collection/" + }, + "name": "Download a collection of digitised images", + "position": 0, + "programmingLanguage": { + "@id": "https://www.python.org/downloads/release/python-31012/" + }, + "url": "https://github.com/GLAM-Workbench/trove-images/blob/master/download-image-collection.ipynb", + "workExample": [] + }, + { + "@id": "https://glam-workbench.net/trove-images/use-of-rights-statements/", + "@type": "CreativeWork", + "isPartOf": { + "@id": "https://glam-workbench.net" + }, + "name": "The use of standard licences and rights statements in Trove image records", + "url": "https://glam-workbench.net/trove-images/use-of-rights-statements/" + }, + { + "@id": "rights-statements-on-images.ipynb", + "@type": [ + "File", + "SoftwareSourceCode" + ], + "author": [ + { + "@id": "https://orcid.org/0000-0001-7956-4498" + } + ], + "category": "", + "codeRepository": "https://github.com/GLAM-Workbench/trove-images/", + "conformsTo": { + "@id": "https://purl.archive.org/textcommons/profile#Notebook" + }, + "description": "This notebook uses Trove's `rights` index to build a picture of which licences and rights statements are currently being applied to images, and by who.", + "encodingFormat": "application/x-ipynb+json", + "mainEntityOfPage": { + "@id": "https://glam-workbench.net/trove-images/use-of-rights-statements/" + }, + "name": "The use of standard licences and rights statements in Trove image records", + "position": 0, + "programmingLanguage": { + "@id": "https://www.python.org/downloads/release/python-31012/" + }, + "url": "https://github.com/GLAM-Workbench/trove-images/blob/master/rights-statements-on-images.ipynb", + "workExample": [] + }, + { + "@id": "https://glam-workbench.net/trove-images/trove-images-rights-data/", + "@type": "CreativeWork", + "isPartOf": { + "@id": "https://glam-workbench.net" + }, + "name": "Trove images rights data", + "url": "https://glam-workbench.net/trove-images/trove-images-rights-data/" + }, + { + "@id": "https://github.com/GLAM-Workbench/trove-images-rights-data/", + "@type": "Dataset", + "description": "This dataset includes information about the application of licences and rights statements to images by Trove contributors.", + "mainEntityOfPage": { + "@id": "https://glam-workbench.net/trove-images/trove-images-rights-data/" + }, + "name": "trove-images-rights-data", + "url": "https://github.com/GLAM-Workbench/trove-images-rights-data/", + "workExample": [] + }, + { + "@id": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-images.csv", + "@type": [ + "File", + "Dataset" + ], + "contentSize": 19589, + "dateModified": "2024-04-20", + "description": "This dataset lists the number of images with each rights statement from organisations contributing to Trove.", + "encodingFormat": "text/csv", + "isPartOf": { + "@id": "https://github.com/GLAM-Workbench/trove-images-rights-data/" + }, + "license": { + "@id": "https://creativecommons.org/publicdomain/zero/1.0/" + }, + "name": "rights-on-images.csv", + "sdDatePublished": "2024-04-24", + "size": 216, + "url": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-images.csv" + }, + { + "@id": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-out-of-copyright-photos.csv", + "@type": [ + "File", + "Dataset" + ], + "contentSize": 15649, + "dateModified": "2024-04-20", + "description": "This dataset lists the number of out-of-copyright photographs with each rights statement from organisations contributing to Trove.", + "encodingFormat": "text/csv", + "isPartOf": { + "@id": "https://github.com/GLAM-Workbench/trove-images-rights-data/" + }, + "license": { + "@id": "https://creativecommons.org/publicdomain/zero/1.0/" + }, + "name": "rights-on-out-of-copyright-photos.csv", + "sdDatePublished": "2024-04-24", + "size": 171, + "url": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-out-of-copyright-photos.csv" + }, + { + "@id": "#rights-statements-on-images_run_0", + "@type": "CreateAction", + "actionStatus": { + "@id": "http://schema.org/CompletedActionStatus" + }, + "endDate": "2024-04-20", + "instrument": { + "@id": "rights-statements-on-images.ipynb" + }, + "name": "Run of notebook: rights-statements-on-images.ipynb", + "result": [ + { + "@id": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-images.csv" + }, + { + "@id": "https://github.com/GLAM-Workbench/trove-images-rights-data/raw/main/rights-on-out-of-copyright-photos.csv" + } + ] + } + ] +} \ No newline at end of file diff --git a/runtime.txt b/runtime.txt index 3cd3102..36f7654 100644 --- a/runtime.txt +++ b/runtime.txt @@ -1 +1 @@ -python-3.8 \ No newline at end of file +"python-3.10" \ No newline at end of file diff --git a/scripts/add_nb_metadata.py b/scripts/add_nb_metadata.py new file mode 100644 index 0000000..c1da7cd --- /dev/null +++ b/scripts/add_nb_metadata.py @@ -0,0 +1,40 @@ +import json +from pathlib import Path +from typing import Any, Dict, List, Optional +import nbformat +import re + +DEFAULT_AUTHORS = [{ + "name": "Sherratt, Tim", + "orcid": "https://orcid.org/0000-0001-7956-4498", + "mainEntityOfPage": "https://timsherratt.au" +}] + +def main(): + notebooks = get_notebooks() + for notebook in notebooks: + nb = nbformat.read(notebook, nbformat.NO_CONVERT) + title = extract_notebook_title(nb) + metadata = {"name": title, "author": DEFAULT_AUTHORS} + nb.metadata.rocrate = metadata + # print(nb.metadata) + nbformat.write(nb, notebook, nbformat.NO_CONVERT) + +def extract_notebook_title(nb): + md_cells = [c for c in nb.cells if c["cell_type"] == "markdown"] + for cell in md_cells: + if title := re.search(r"^# (.+)(\n|$)", cell["source"]): + return title.group(1) + +def get_notebooks(): + """ + Returns a list of paths to jupyter notebooks in the current directory + Returns: + Paths of the notebooks found in the directory + """ + files = Path(".").glob("*.ipynb") + is_notebook = lambda file: not file.name.lower().startswith(("draft", "untitled")) + return list(filter(is_notebook, files)) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/scripts/create_previews.py b/scripts/create_previews.py new file mode 100644 index 0000000..931d553 --- /dev/null +++ b/scripts/create_previews.py @@ -0,0 +1,34 @@ +import nbformat +from nbconvert import HTMLExporter +from nbconvert.preprocessors import ExecutePreprocessor +from pathlib import Path +import argparse + +def main(path): + if path: + output = Path(path, "previews") + else: + output = Path("previews") + output.mkdir(exist_ok=True) + + nbs = [n for n in Path(".").glob("*.ipynb") if not n.name.startswith(("index.", "draft", "Untitled", "snippets"))] + for nb_path in nbs: + print(nb_path.name) + with nb_path.open() as f: + nb = nbformat.read(f, as_version=4) + #ep = ExecutePreprocessor(skip_cells_with_tag="nbval-skip") + #ep.preprocess(nb, {'metadata': {'path': '.'}}) + html_exporter = HTMLExporter() + + # 3. Process the notebook we loaded earlier + (body, resources) = html_exporter.from_notebook_node(nb) + + Path(output, f"{nb_path.stem}.html").write_text(body) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--path", type=str, help="Path to save", required=False + ) + args = parser.parse_args() + main(args.path) diff --git a/scripts/extract_metadata.py b/scripts/extract_metadata.py new file mode 100644 index 0000000..1de35b4 --- /dev/null +++ b/scripts/extract_metadata.py @@ -0,0 +1,38 @@ +import nbformat +import re + +LISTIFY = ["author", "object", "result"] + + +def listify(value): + if not isinstance(value, list): + return [value] + return value + +def extract_notebook_title(nb): + md_cells = [c for c in nb.cells if c["cell_type"] == "markdown"] + for cell in md_cells: + if title := re.search(r"^# (.+)(\n|$)", cell["source"]): + return title.group(1) + + +def extract_notebook_metadata(notebook, keys): + """Attempts to extract metadata from the notebook. + + Parameters: + notebook: The path to the jupyter notebook + keys: A dictionary of keys to look for in the notebook, and their + corresponding defaults if the key is not found. + + Returns: + A dictionary containing the retrieved metadata for each key. + """ + result = {} + nb = nbformat.read(notebook, nbformat.NO_CONVERT) + metadata = nb.metadata.rocrate + for key, default in keys.items(): + if key in LISTIFY: + result[key] = listify(metadata.get(key, default)) + else: + result[key] = metadata.get(key, default) + return result diff --git a/scripts/generate_readme.py b/scripts/generate_readme.py new file mode 100644 index 0000000..47fbc54 --- /dev/null +++ b/scripts/generate_readme.py @@ -0,0 +1,54 @@ +from rocrate.rocrate import ROCrate +import json +from pathlib import Path +import re + +def get_create_action(crate, datafile): + actions = crate.get_by_type("CreateAction") + for action in actions: + props = action.properties() + for result in props["result"]: + if result["@id"] == datafile: + return action + +crate = ROCrate("./") +root = crate.get("./").properties() +gw_section = crate.get(root["mainEntityOfPage"]["@id"]) + +md = f"# {root['name']}\n\n" + +if version := root.get("version"): + md += f"CURRENT VERSION: {version}\n\n" + +md += f"{root['description']}\n\n" +md += f"For more information and documentation see the [{gw_section['name']}]({gw_section['url']}) section of the [GLAM Workbench](https://glam-workbench.net)." +md += "\n\n## Notebooks\n" +details = "\n\n## Dataset details" + +for nb in crate.get_by_type(["File", "SoftwareSourceCode"]): + md += f'- [{nb["name"]}]({nb["url"]})\n' + +datasets = [] +for action in crate.get_by_type("CreateAction"): + print(action) + for result in action["result"]: + dataset = crate.get(result["@id"]) + try: + source = crate.get(dataset["isPartOf"]["@id"]) + datasets.append(source) + except KeyError: + datasets.append(dataset) + + +if datasets: + md += "\n\n## Associated datasets\n" + for ds in list(set(datasets)): + md += f'- [{ds["name"]}]({ds["url"]})\n' + +md += "\n\n\n\n" + +md += "\n\n----\nCreated by [Tim Sherratt](https://timsherratt.au) for the [GLAM Workbench](https://glam-workbench.net)" + + +md = re.sub(r'', '', md) +Path("README.md").write_text(md) diff --git a/scripts/list_imports.py b/scripts/list_imports.py new file mode 100644 index 0000000..3de42d0 --- /dev/null +++ b/scripts/list_imports.py @@ -0,0 +1,41 @@ +from pathlib import Path +import json +import re +import importlib.util +import os.path +import sys + +external_imports = ['jupyterlab', 'voila', 'voila-material @ git+https://github.com/GLAM-Workbench/voila-material.git'] + +python_path = os.path.dirname(sys.executable).replace('bin', 'lib') +#print(python_path) + +imports = [] +for nb in Path(__file__).resolve().parent.parent.glob('*.ipynb'): + if not nb.name.startswith('.') and not nb.name.startswith('Untitled'): + nb_json = json.loads(nb.read_bytes()) + for cell in nb_json['cells']: + for line in cell['source']: + if match := re.search(r'^\s*import ([a-zA-Z_]+)(?! from)', line): + imports.append(match.group(1)) + elif match := re.search(r'^\s*from ([a-zA-Z_]+)\.?[a-zA-Z_]* import [a-zA-Z_]+', line): + imports.append(match.group(1)) + +# print(list(set(imports))) + +for imported_mod in list(set(imports)): + try: + module_path = importlib.util.find_spec(imported_mod).origin + except AttributeError: + pass + else: + if module_path: + # print(imported_mod) + # print(module_path) + if 'site-packages' in module_path or python_path in module_path: + external_imports.append(imported_mod) + #print(external_imports) + +with Path(Path(__file__).resolve().parent.parent, 'requirements-tocheck.in').open('w') as req_file: + for mod in external_imports: + req_file.write(mod + '\n') diff --git a/test_and_lint.sh b/scripts/test_and_lint.sh similarity index 100% rename from test_and_lint.sh rename to scripts/test_and_lint.sh diff --git a/scripts/update_crate.py b/scripts/update_crate.py new file mode 100755 index 0000000..b1ced7a --- /dev/null +++ b/scripts/update_crate.py @@ -0,0 +1,837 @@ +import os +import argparse +import datetime +import requests +from giturlparse import parse as ghparse +from git import Repo +from pathlib import Path +import mimetypes +from bs4 import BeautifulSoup +from dotenv import load_dotenv +from rocrate.rocrate import ROCrate +from rocrate.model.person import Person +from rocrate.model.data_entity import DataEntity +from rocrate.model.contextentity import ContextEntity +from extract_metadata import extract_notebook_metadata + +load_dotenv() + +NOTEBOOK_EXTENSION = ".ipynb" + +DEFAULT_LICENCE = { + "@id": "https://spdx.org/licenses/MIT", + "name": "MIT License", + "@type": "CreativeWork", + "url": "https://spdx.org/licenses/MIT.html", +} + +METADATA_LICENCE = { + "@id": "https://creativecommons.org/publicdomain/zero/1.0/", + "name": "CC0 Public Domain Dedication", + "@type": "CreativeWork", + "url": "https://creativecommons.org/publicdomain/zero/1.0/", +} + +NKC_LICENCE = { + "@id": "http://rightsstatements.org/vocab/NKC/1.0/", + "@type": "CreativeWork", + "description": "The organization that has made the Item available reasonably believes that the Item is not restricted by copyright or related rights, but a conclusive determination could not be made.", + "name": "No Known Copyright", + "url": "http://rightsstatements.org/vocab/NKC/1.0/" +} +CNE_LICENCE = { + "@id": "http://rightsstatements.org/vocab/CNE/1.0/", + "@type": "CreativeWork", + "description": "The copyright and related rights status of this Item has not been evaluated.", + "name": "Copyright Not Evaluated", + "url": "http://rightsstatements.org/vocab/CNE/1.0/" +} + +PYTHON = { + "@id": "https://www.python.org/downloads/release/python-31012/", + "version": "3.10.12", + "name": "Python 3.10.12", + "url": "https://www.python.org/downloads/release/python-31012/", + "@type": ["ComputerLanguage", "SoftwareApplication"], +} + +DEFAULT_AUTHORS = [ + { + "name": "Sherratt, Tim", + "orcid": "https://orcid.org/0000-0001-7956-4498", + "mainEntityOfPage": "https://timsherratt.au", + } +] + +GLAM_WORKBENCH = { + "@id": "https://glam-workbench.net/", + "@type": "CreativeWork", + "name": "GLAM Workbench", + "url": "https://glam-workbench.net/", + "description": "A collection of tools, tutorials, examples, and hacks to help researchers work with data from galleries, libraries, archives, and museums (the GLAM sector).", + "author": [{"@id": "https://orcid.org/0000-0001-7956-4498"}], +} + + +def main(version, data_repo): + # Make working directory the parent of the scripts directory + os.chdir(Path(__file__).resolve().parent.parent) + # Get a list of paths to notebooks in the cwd + notebooks = get_notebooks() + # Update the crate + update_crate(version, data_repo, notebooks) + + +def get_notebooks(): + """Returns a list of paths to jupyter notebooks in the given directory + + Parameters: + dir: The path to the directory in which to search. + + Returns: + Paths of the notebooks found in the directory + """ + # files = [Path(file) for file in os.listdir()] + files = Path(".").glob("*.ipynb") + is_notebook = lambda file: not file.name.lower().startswith(("draft", "untitled", "index.")) + return list(filter(is_notebook, files)) + + +def id_ify(elements): + """Wraps elements in a list with @id keys + eg, convert ['a', 'b'] to [{'@id': 'a'}, {'@id': 'b'}] + """ + # If the input is a string, make it a list + # elements = [elements] if isinstance(elements, str) else elements + # Nope - single elements shouldn't be lists, see: https://www.researchobject.org/ro-crate/1.1/appendix/jsonld.html + if isinstance(elements, str): + return {"@id": elements} + else: + return [{"@id": element} for element in elements] + + +def add_people(crate, authors): + """Converts a list of authors to a list of Persons to be embedded within an ROCrate + + Parameters: + crate: The rocrate in which the authors will be created. + authors: + A list of author information. + Expects a dict with at least a 'name' value ('Surname, Givenname') + If there's an 'orcid' this will be used as the id (and converted to a uri if necessary) + Returns: + A list of Persons. + """ + persons = [] + + # Loop through list of authors + for author in authors: + # If there's no orcid, create an id from the name + if "orcid" not in author or not author["orcid"]: + author_id = f"#{author['name'].replace(', ', '_')}" + + # If there's an orcid but it's not a url, turn it into one + elif not author["orcid"].startswith("http"): + author_id = f"https://orcid.org/{author['orcid']}" + + # Otherwise we'll just use the orcid as the id + else: + author_id = author["orcid"] + + # Check to see if there's already an entry for this person in the crate + author_current = crate.get(author_id) + + # If there's already an entry we'll update the existing properties + if author_current: + properties = author_current.properties() + + # Update the name in case it has changed + # properties.update({"name": author["name"]}) + for key, value in author.items(): + properties.update({key: value}) + + # Otherwise set default properties + else: + properties = {"name": author["name"]} + + # Add/update the person record and add to the list of persons to return + persons.append(crate.add(Person(crate, author_id, properties=properties))) + + return persons + +def find_local_file(file_name, local_path): + # Look for local copy of data file in likely locations + file_path = Path(local_path, file_name) + if file_path.exists(): + return file_path + + +def get_file_stats(datafile, local_path): + """ + Try to get the file size and last modified date of the datafile. + """ + file_name = datafile.rstrip("/").split("/")[-1] + local_file = find_local_file(file_name, local_path) + # If there's a local copy use that to derive stats + # This means we can get an accurate date modified value (GitHub only gives date committed). + if local_file and local_file.is_dir(): + size = None + rows = len(list(local_file.glob("*"))) + # print(rows) + stats = local_file.stat() + date = datetime.datetime.fromtimestamp(stats.st_mtime).strftime("%Y-%m-%d") + elif local_file: + # Get file stats from local filesystem + stats = local_file.stat() + size = stats.st_size + date = datetime.datetime.fromtimestamp(stats.st_mtime).strftime("%Y-%m-%d") + if local_file.name.endswith((".zip", ".db")): + rows = "" + else: + rows = 0 + with local_file.open("r") as df: + for line in df: + rows += 1 + elif datafile.startswith("http"): + # I don't think I want to download the whole file, so set to None + rows = None + # Process GitHub links + if "github.com" in datafile: + # the ghparser doesn't seem to like 'raw' urls + datafile = datafile.replace("/raw/", "/blob/") + gh_parts = ghparse(datafile) + + # API url to get the latest commit for this file + gh_commit_url = f"https://api.github.com/repos/{gh_parts.owner}/{gh_parts.repo}/commits?path={gh_parts.path_raw.split('/')[-1]}" + try: + response = requests.get(gh_commit_url) + + # Get the date of the last commit + date = response.json()[0]["commit"]["committer"]["date"][:10] + + except (IndexError, KeyError): + date = None + + # Different API endpoint for file data + gh_file_url = f"https://api.github.com/repos/{gh_parts.owner}/{gh_parts.repo}/contents/{gh_parts.path_raw.split('/')[-1]}" + try: + response = requests.get(gh_file_url) + contents_data = response.json() + # Get the file size + try: + size = contents_data["size"] + except TypeError: + size = None + + except KeyError: + size = None + + else: + # If the file is online, get size from content headers + size = requests.head(datafile).headers.get("Content-length") + date = None + + return date, size, rows + + +def get_default_gh_branch(url): + # Process GitHub links + if "github.com" in url: + # the ghparser doesn't seem to like 'raw' urls + url = url.replace("/raw/", "/blob/") + gh_parts = ghparse(url) + headers = {'Authorization': f'token {os.getenv("GITHUB_TOKEN")}'} + gh_repo_url = f"https://api.github.com/repos/{gh_parts.owner}/{gh_parts.repo}" + response = requests.get(gh_repo_url, headers=headers) + return response.json().get("default_branch") + +def add_files(crate, action, data_type, gw_url, data_repo, local_path): + """ + Add data files to the crate. + Tries to extract some basic info about files (size, date) before adding them. + """ + file_entities = [] + + # Loop through list of datafiles + for df_data in action.get(data_type, []): + datafile = df_data["url"] + print(datafile) + # print(df_data) + # local_path = action.get("local_path", ".") + + # Check if file exists (or is a url) + if ( + Path(datafile).exists() + or (datafile.startswith("http")) + or (data_repo and data_repo in datafile) + ): + # If this is a data repo crate use the file name (not full url) as the id + if data_repo and data_repo in datafile: + file_id = datafile.rstrip("/").split("/")[-1] + else: + file_id = datafile + # To construct a full GitHub url to a file we need to find the repo url and default branch + if not datafile.startswith("http"): + repo = Repo(".") + repo_url = repo.git.config("--get", "remote.origin.url").replace( + ".git", "/" + ) + gh_branch = get_default_gh_branch(repo_url) + file_url = f"{repo_url}blob/{gh_branch}/{datafile}" + else: + file_url = datafile + + # Get date and size info + date, size, rows = get_file_stats(datafile, local_path) + + # Check to see if there's already an entry for this file in the crate + file_entity = crate.get(file_id) + + # If there's already an entry for this file, we'll keep it's properties + # but modify the date, size etc later + if file_entity: + properties = file_entity.properties() + # print(properties) + + # Otherwise we'll define default properties for a new file entity + else: + if df_data.get("name"): + name = df_data.get("name") + else: + name = datafile.rstrip("/").split("/")[-1] + properties = { + "name": name, + "url": file_url, + } + + # Add contextual entities for data repo associated with file + # If this is a data repo crate, this is not necessary as the crate root will have this + if data_type == "result" and not data_repo: + # print(data_type) + examples = action.get("workExample", []) + # print(examples) + add_example_entities(crate, examples) + if gw_page := action.get("mainEntityOfPage"): + add_gw_page_link(crate, gw_page) + data_repo_url = action.get("isPartOf") + if data_repo_url: + properties["isPartOf"] = id_ify(data_repo_url) + elif gw_page: + properties["mainEntityOfPage"] = id_ify(gw_page) + properties["workExample"] = id_ify([e["url"] for e in examples]) + if not crate.get(data_repo_url): + + data_rocrate = { + "@id": data_repo_url, + "@type": "Dataset", + "url": data_repo_url, + "name": data_repo_url.rstrip("/").split("/")[-1] + } + if data_roc_description := action.get("description"): + print(data_roc_description) + data_rocrate["description"] = data_roc_description + if gw_page: + # print(gw_page) + data_rocrate["mainEntityOfPage"] = id_ify(gw_page) + if current_data_rocrate := crate.get(data_repo_url): + current_examples = [e["@id"] for e in current_data_rocrate.properties().get("workExample")] + else: + current_examples = [] + data_rocrate["workExample"] = id_ify(list(set([e["url"] for e in examples] + current_examples))) + + add_context_entity(crate, data_rocrate) + + + # Guess the encoding type from extension + encoding = mimetypes.guess_type(datafile)[0] + if encoding: + properties["encodingFormat"] = encoding + + if description := df_data.get("description"): + properties["description"] = description + if license := df_data.get("license"): + properties["license"] = id_ify(license) + + # Add/update modified date + if date: + properties["dateModified"] = date + + # Add/update file size + if size: + properties["contentSize"] = size + + # If it's a CSV add number of rows + if rows and properties.get("encodingFormat") == "text/csv": + properties["size"] = rows - 1 + elif rows: + properties["size"] = rows + + # If it's a web link add today's date to indicate when it was last accessed + if datafile.startswith("http"): + properties["sdDatePublished"] = datetime.datetime.now().strftime( + "%Y-%m-%d" + ) + + # Add/update the file entity and add to the list of file entities + local_file = find_local_file(datafile.rstrip("/").split("/")[-1], action.get("local_path", ".")) + # print(datafile, local_file, file_id) + if data_repo and data_repo in datafile: + crate_id = local_file + else: + crate_id = file_id + if local_file and local_file.is_dir(): + properties["@type"] = "Dataset" + file_entities.append(crate.add_dataset(crate_id, properties=properties)) + elif local_file: + properties["@type"] = ["File", "Dataset"] + file_entities.append(crate.add_file(crate_id, properties=properties)) + else: + file_entities.append(crate.add_file(crate_id, properties=properties)) + return file_entities + + +def add_action(crate, notebook, input_files, output_files, query, index, local_path): + """ + Links a notebook and associated datafiles through a CreateAction. + """ + # Create an action id from the notebook name + action_id = f"{notebook.id.split('/')[-1].replace('.ipynb', '')}_run_{index}" + + # Get a list of dates from the output files + dates = [f.properties()["dateModified"] for f in output_files if "dateModified" in f.properties()] + # Find the latest date to use as the endDate for the action + try: + last_date = sorted(dates)[-1] + + # There's no dates (or no output files) + except IndexError: + # Use the date the notebook was last modified + last_date, _, _ = get_file_stats(notebook.id, local_path) + + # Check to see if this action is already in the crate + action_current = crate.get(action_id) + if action_current: + # Remove current files from existing action entity + properties = {"object": [], "result": []} + else: + # Default properties for new action + properties = { + "@type": "CreateAction", + "instrument": id_ify(notebook.id), + "actionStatus": {"@id": "http://schema.org/CompletedActionStatus"}, + "name": f"Run of notebook: {notebook.id.split('/')[-1]}", + } + + if query: + properties["query"] = query + + # Set endDate to latest file modification date + properties["endDate"] = last_date + + # Add or update action + action_new = crate.add(ContextEntity(crate, action_id, properties=properties)) + + # Add input files to action + for input in input_files: + action_new.append_to("object", input) + + # Add output files to action + for output in output_files: + action_new.append_to("result", output) + +def add_example_entities(crate, examples): + for example in examples: + example_props = { + "@id": example["url"], + "@type": "CreativeWork", + "name": example["name"], + "url": example["url"] + } + add_context_entity(crate, example_props) + + +def creates_data(data_repo, notebook_metadata): + """ + Check to see if a notebook creates a data file. + """ + if data_repo: + for action in notebook_metadata["action"]: + for result in action["result"]: + if data_repo in result["url"]: + return True + return False + + +def add_notebook(crate, notebook, data_repo, gw_url): + """Adds notebook information to an ROCRate. + + Parameters: + crate: The rocrate to update. + notebook: The notebook to add to the rocrate + """ + # Get the crate root + root = crate.get("./").properties() + + # Extract embedded metadata from the notebook + notebook_metadata = extract_notebook_metadata( + notebook, + { + "name": notebook.name, + "author": [], + "description": "", + "action": [], + "mainEntityOfPage": "", + "workExample": [], + "category": "", + "position": 0 + }, + ) + # print(notebook.name) + has_data = creates_data(data_repo, notebook_metadata) + + # If this is a data repo crate change nb ids to full urls + if has_data: + repo = Repo(".") + repo_url = repo.git.config("--get", "remote.origin.url").replace(".git", "/") + gh_branch = get_default_gh_branch(repo_url) + nb_id = f"{repo_url}blob/{gh_branch}/{notebook.name}" + nb_url = nb_id + else: + repo_url = root["url"] + gh_branch = get_default_gh_branch(repo_url) + nb_id = notebook + nb_url = f"{repo_url}blob/{gh_branch}/{notebook.name}" + + # If this is a data repo crate only add notebooks that generate data + if not data_repo or has_data: + # Check if this notebook is already in the crate + nb_current = crate.get(notebook.name) + + # If there's an entry for this notebook, we'll update it + if nb_current: + # Get current properties of the notebook + properties = nb_current.properties() + + # If details have changed in notebook metadata they should be updated in the crate + properties.update( + { + "name": notebook_metadata["name"], + "description": notebook_metadata["description"], + } + ) + else: + # Default properties for a new notebook + properties = { + "@type": ["File", "SoftwareSourceCode"], + "name": notebook_metadata["name"], + "description": notebook_metadata["description"], + "programmingLanguage": id_ify(PYTHON["@id"]), + "encodingFormat": "application/x-ipynb+json", + "conformsTo": id_ify( + "https://purl.archive.org/textcommons/profile#Notebook" + ), + "codeRepository": repo_url, + "url": nb_url, + "category": notebook_metadata["category"], + "position": notebook_metadata["position"] + } + + if doc_url := notebook_metadata.get("mainEntityOfPage"): + add_gw_page_link(crate, doc_url) + properties["mainEntityOfPage"] = id_ify(doc_url) + + nb_examples = notebook_metadata.get("workExample", []) + add_example_entities(crate, nb_examples) + properties["workExample"] = id_ify([e["url"] for e in nb_examples]) + + + # Add input files from 'object' property of actions + #nb_inputs = [a["object"] for a in notebook_metadata.get("action", [])] + #input_files = add_files(crate, nb_inputs, data_repo) + + # Add output files from 'result' property + #nb_outputs = [a["result"] for a in notebook_metadata.get("action", [])] + #output_files = add_files(crate, nb_outputs, data_repo) + + # Add or update the notebook entity + # (if there's an existing entry it will be overwritten) + nb_new = crate.add_file(nb_id, properties=properties) + + # Add a CreateAction that links the notebook run with the input and output files + for index, action in enumerate(notebook_metadata.get("action", [])): + local_path = action.get("local_path", ".") + if not data_repo or data_repo in action.get("result", [])[0]["url"]: + # print(action) + input_files = add_files(crate, action, "object", gw_url, data_repo, local_path) + output_files = add_files(crate, action, "result", gw_url, data_repo, local_path) + add_action(crate, nb_new, input_files, output_files, action.get("query", ""), index, local_path) + if data_repo: + if dataset_gw_page := action.get("mainEntityOfPage"): + crate.update_jsonld({"@id": "./", "mainEntityOfPage": id_ify(dataset_gw_page)}) + add_gw_page_link(crate, dataset_gw_page) + if dataset_description := action.get("description"): + crate.update_jsonld({"@id": "./", "description": dataset_description}) + dataset_examples = action.get("workExample", []) + current_examples = root.get("workExample", []) + crate.update_jsonld({"@id": "./", "workExample": id_ify([e["url"] for e in dataset_examples]) + current_examples}) + add_example_entities(crate, dataset_examples) + + # If the notebook has author info, add people to crate + if notebook_metadata["author"]: + # Add people referenced in notebook metadata + persons = add_people(crate, notebook_metadata["author"]) + + # Otherwise add crate root authors to notebook + else: + persons = root["author"] + + # If people are not already attached to notebook, append them to the author property + for person in persons: + if ( + nb_current and person not in nb_current.get("author", []) + ) or not nb_current: + nb_new.append_to("author", person) + + +def remove_deleted_files(crate, data_paths): + """ + Loops through File entities checking to see if they exist in local filesystem. + If they don't then they're removed from the crate. + """ + file_ids = [] + for action in crate.get_by_type("CreateAction"): + for file_type in ["object", "result"]: + try: + file_ids += [o["@id"] for o in action.properties()[file_type]] + except KeyError: + pass + + # Loop through File entities + for f in crate.get_by_type("File"): + found = False + for dpath in data_paths: + if Path(dpath, f.id).exists(): + found = True + # If they don't exist and they're not urls, then delete + if not found and not f.id.startswith("http"): + crate.delete(f) + # If they're not referenced in CreateActions then delete + if f.id not in file_ids and not f.id.endswith(".ipynb"): + crate.delete(f) + + +def remove_unreferenced_authors(crate): + """ + Compares the current Person entities with those referenced by the "author" property. + Removes Person entities that are not authors. + """ + # Get authors from root + authors = crate.get("./")["author"] + + # Loop through all File entities, extracting authors + for file_ in crate.get_by_type("File"): + try: + authors += file_["author"] + except KeyError: + pass + # Loop though Person entities checking against authors + for person in crate.get_by_type("Person"): + # If Person is not an author, delete them + if not person in authors: + crate.delete(person) + + +def add_update_action(crate, version): + """ + Adds an UpdateAction to the crate when the repo version is updated. + """ + # Create an id for the action using the version number + action_id = f"create_version_{version.replace('.', '_')}" + + # Set basic properties for action + properties = { + "@type": "UpdateAction", + "endDate": datetime.datetime.now().strftime("%Y-%m-%d"), + "name": f"Create version {version}", + "actionStatus": {"@id": "http://schema.org/CompletedActionStatus"}, + } + + # Create entity + crate.add(ContextEntity(crate, action_id, properties=properties)) + + +def add_context_entity(crate, entity): + """ + Adds a ContextEntity to the crate. + + Parameters: + crate: the current ROCrate + entity: A JSONLD ready dict containing "@id" and "@type" values + """ + crate.add(ContextEntity(crate, entity["@id"], properties=entity)) + +def add_gw_page_link(crate, doc_url): + gw_title = get_page_title(doc_url) + nd_docs = { + "@id": doc_url, + "@type": "CreativeWork", + "name": gw_title, + "isPartOf": id_ify("https://glam-workbench.net"), + "url": doc_url + } + add_context_entity(crate, nd_docs) + +def get_page_title(url): + response = requests.get(url) + if response.ok: + soup = BeautifulSoup(response.text, features="lxml") + return soup.title.string.split(" - ")[0].strip() + +def get_gw_docs(repo_name): + """ """ + gw_url = f"https://glam-workbench.net/{repo_name}" + gw_title = get_page_title(gw_url) + if gw_title: + return {"url": gw_url, "title": gw_title} + + +def update_crate(version, data_repo, notebooks): + """Creates a parent crate in the supplied directory. + + Parameters: + version: The version of the repository + notebooks: The notebooks to include in the crate + """ + repo = Repo(".") + code_repo_url = repo.git.config("--get", "remote.origin.url").replace(".git", "/") + + # Set some defaults based on whether this is a code or data repo + if data_repo: + crate_source = "./data-rocrate" + repo_url = data_repo + description = "A GLAM Workbench dataset" + else: + crate_source = "./" + repo_url = code_repo_url + description = "A GLAM Workbench repository" + + repo_name = repo_url.strip("/").split("/")[-1] + code_repo_name = code_repo_url.strip("/").split("/")[-1] + # Get links to the GLAM Workbench + gw_link = get_gw_docs(repo_name) + if gw_link: + gw_url = gw_link.get("url") + else: + gw_url = None + # Load existing crate + try: + crate = ROCrate(source=crate_source) + + # If there's not an existing crate, create a new one + except (ValueError, FileNotFoundError): + crate = ROCrate() + + crate.update_jsonld( + { + "@id": "./", + "@type": "Dataset", + "name": repo_name, + "description": description, + "url": repo_url, + "author": id_ify([a["orcid"] for a in DEFAULT_AUTHORS]), + } + ) + + if gw_link: + gw_url = gw_link.get("url") + crate.update_jsonld( + {"@id": "./", "mainEntityOfPage": id_ify(gw_url)} + ) + gw_docs = { + "@id": gw_url, + "@type": "CreativeWork", + "name": gw_link["title"], + "isPartOf": id_ify("https://glam-workbench.net/"), + "url": gw_url, + } + add_context_entity(crate, gw_docs) + add_context_entity(crate, GLAM_WORKBENCH) + + add_people(crate, DEFAULT_AUTHORS) + + # If this is a data repo crate, create a link back to code repo + if data_repo: + crate.update_jsonld( + { + "@id": "./", + "isBasedOn": id_ify(code_repo_url), + "distribution": id_ify(f"{repo_url.rstrip('/')}/archive/refs/heads/main.zip") + } + ) + source_repo = { + "@id": code_repo_url, + "@type": "Dataset", + "name": code_repo_name, + "url": code_repo_url, + } + add_context_entity(crate, source_repo) + + download = { + "@id": f"{ repo_url.rstrip('/')}/archive/refs/heads/main.zip", + "@type": "DataDownload", + "name": "Download repository as zip", + "url": f"{ repo_url.rstrip('/')}/archive/refs/heads/main.zip", + } + add_context_entity(crate, download) + + # If this is a new version, change version number and add UpdateAction + if version: + crate.update_jsonld( + { + "@id": "./", + "version": version, + "datePublished": datetime.datetime.now().strftime("%Y-%m-%d"), + } + ) + add_update_action(crate, version) + + # Add licence to root + crate.license = id_ify(DEFAULT_LICENCE["@id"]) + add_context_entity(crate, DEFAULT_LICENCE) + + # Add licence to metadata + crate.update_jsonld( + { + "@id": "ro-crate-metadata.json", + "license": id_ify(METADATA_LICENCE["@id"]), + } + ) + add_context_entity(crate, METADATA_LICENCE) + add_context_entity(crate, NKC_LICENCE) + add_context_entity(crate, CNE_LICENCE) + + # Add Python for programming language + add_context_entity(crate, PYTHON) + + # Process notebooks + for notebook in notebooks: + add_notebook(crate, notebook, data_repo, gw_url) + + # Remove files from crate if they're no longer in the repo + # remove_deleted_files(crate, data_paths) + + # Remove authors from crate if they're not referenced by any entities + remove_unreferenced_authors(crate) + + # Save the crate + crate.write(crate_source) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--version", type=str, help="New version number", required=False + ) + parser.add_argument("--data-repo", type=str, default="", required=False) + args = parser.parse_args() + main(args.version, args.data_repo) diff --git a/update_version.sh b/scripts/update_version.sh similarity index 91% rename from update_version.sh rename to scripts/update_version.sh index a9bf36f..3824b95 100755 --- a/update_version.sh +++ b/scripts/update_version.sh @@ -10,4 +10,5 @@ jq --arg text "$text" '.description = $text' .zenodo.json \ | jq --arg identifier $identifier '.related_identifiers[0].identifier = $identifier' \ | jq --arg pdate "$pdate" '.publication_date = $pdate' > zenodo.json; rm .zenodo.json; -mv zenodo.json .zenodo.json; \ No newline at end of file +mv zenodo.json .zenodo.json; +python scripts/update_crate.py --version $1 \ No newline at end of file