Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature: streamlit website #100

Open
wants to merge 17 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -161,4 +161,6 @@ Thumbs.db
MyClippings.txt
dist/
images/
my_kindle_clippings.json
my_kindle_clippings.json
/My Clippings.txt
.streamlit/secrets.toml
2 changes: 2 additions & 0 deletions .streamlit/config.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
[theme]
base="light"
91 changes: 91 additions & 0 deletions app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
from io import StringIO

import streamlit as st

from kindle2notion.__main__ import (
update_kindle_clippings_streamlit,
)

st.set_page_config(
page_title="Kindle 2 Notion",
page_icon="📓",
layout="wide",
initial_sidebar_state="expanded",
menu_items={
"Get help": "https://github.com/paperboi/kindle2notion",
"Report a bug": "https://github.com/paperboi/kindle2notion/issues",
"About": "Streamlit adaptation for [Kindle2Notion library](https://github.com/paperboi/kindle2notion)",
},
)


def main():
st.write(
"""
# Kindle 2 Notion

Streamlit adaptation for [Kindle2Notion library](https://github.com/paperboi/kindle2notion)

## Prerequisites
"""
)
with st.expander("**Show requirements**", expanded=False):
st.write(
"""
1. Create an integration on Notion.

1. Duplicate this [database template](https://kindle2notion.notion.site/6d26062e3bb04dd89b988806978c1fe7?v=0d394a8162cc481280966b35a37465c2) to your the workspace you want to use for storing your Kindle clippings.
2. Open _Settings & Members_ from the left navigation bar.
3. Select the _Integrations_ option listed under _Workspaces_ in the settings modal.
4. Click on _Develop your own integrations_ to redirect to the integrations page.
5. On the integrations page, select the _New integration_ option and enter the name of the integration and the workspace you want to use it with. Hit submit and your internal integration token will be generated.

2. Go back to your database page and click on the _Share_ button on the top right corner. Use the selector to find your integration by its name and then click _Invite_. Your integration now has the requested permissions on the new database.
"""
)
form = st.form("kindle_form")
notion_database_id = form.text_input(
"Notion Database ID",
help="""
Find your _notion_database_id_ from the URL of the database you have copied to your workspace. For reference:

`https://www.notion.so/myworkspace/a8aec43384f447ed84390e8e42c2e089?v=...`

**_a8aec43384f447ed84390e8e42c2e089_** is the database_id
""",
)
notion_api_auth_token = form.text_input(
"Notion API Token",
type="password",
)
clippings_file = form.file_uploader("Upload your clippings file", type=["txt"])
enable_highlight_date = form.toggle(
"Enable Highlight Date",
help="Set to False if you don't want to see the _Date Added_ information in",
)
enable_book_cover = form.toggle(
"Enable Book Cover",
help="Set to False if you don't want to store the book cover in Notion.",
)
clippings_data = None

submit = form.form_submit_button("Send Kindle Clippings to Notion")
if submit:
if clippings_file is not None:
# To convert to a string based IO:
stringio = StringIO(clippings_file.getvalue().decode("utf-8-sig"))
# To read file as string:
clippings_data = stringio.read()
with st.spinner("Uploading data to Notion..."):
update_kindle_clippings_streamlit(
notion_api_auth_token,
notion_database_id,
clippings_data,
enable_highlight_date,
enable_book_cover,
)
st.success("Kindle Clippings have been synced.")


if __name__ == "__main__":
main()
112 changes: 82 additions & 30 deletions kindle2notion/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,81 @@

from kindle2notion.exporting import export_to_notion
from kindle2notion.parsing import parse_raw_clippings_text
from kindle2notion.reading import read_raw_clippings
from kindle2notion.reading import read_raw_clippings, read_raw_clippings_streamlit


def process_kindle_clippings(
notion_api_auth_token,
notion_database_id,
all_clippings,
enable_highlight_date,
enable_book_cover,
):
# Parse all_clippings file and format the content to be sent tp the Notion DB into all_books
all_books = parse_raw_clippings_text(all_clippings)
# Export all the contents in all_books into the Notion DB.
export_to_notion(
all_books,
enable_highlight_date,
enable_book_cover,
notion_api_auth_token,
notion_database_id,
)
with open("my_kindle_clippings.json", "w") as out_file:
json.dump(all_books, out_file, indent=4)
print("Transfer complete... Exiting script...")


def update_kindle_clippings_streamlit(
notion_api_auth_token,
notion_database_id,
clippings_data,
enable_highlight_date,
enable_book_cover,
):
notion = notional.connect(auth=notion_api_auth_token)
db = notion.databases.retrieve(notion_database_id)
if db:
print("Notion page is found. Analyzing clippings file...")
# Open the clippings text file and load it into all_clippings
all_clippings = read_raw_clippings_streamlit(clippings_data)
process_kindle_clippings(
notion_api_auth_token,
notion_database_id,
all_clippings,
enable_highlight_date,
enable_book_cover,
)
else:
print(
"Notion page not found! Please check whether the Notion database ID is assigned properly."
)


def update_kindle_clippings(
notion_api_auth_token,
notion_database_id,
clippings_file,
enable_highlight_date,
enable_book_cover,
):
notion = notional.connect(auth=notion_api_auth_token)
db = notion.databases.retrieve(notion_database_id)
if db:
print("Notion page is found. Analyzing clippings file...")
# Open the clippings text file and load it into all_clippings
all_clippings = read_raw_clippings(clippings_file)
process_kindle_clippings(
notion_api_auth_token,
notion_database_id,
all_clippings,
enable_highlight_date,
enable_book_cover,
)
else:
print(
"Notion page not found! Please check whether the Notion database ID is assigned properly."
)


@click.command()
Expand All @@ -29,35 +103,13 @@ def main(
enable_highlight_date,
enable_book_cover,
):
notion = notional.connect(auth=notion_api_auth_token)
db = notion.databases.retrieve(notion_database_id)

if db:
print("Notion page is found. Analyzing clippings file...")

# Open the clippings text file and load it into all_clippings
all_clippings = read_raw_clippings(clippings_file)

# Parse all_clippings file and format the content to be sent tp the Notion DB into all_books
all_books = parse_raw_clippings_text(all_clippings)

# Export all the contents in all_books into the Notion DB.
export_to_notion(
all_books,
enable_highlight_date,
enable_book_cover,
notion_api_auth_token,
notion_database_id,
)

with open("my_kindle_clippings.json", "w") as out_file:
json.dump(all_books, out_file, indent=4)

print("Transfer complete... Exiting script...")
else:
print(
"Notion page not found! Please check whether the Notion database ID is assigned properly."
)
update_kindle_clippings(
notion_api_auth_token,
notion_database_id,
clippings_file,
enable_highlight_date,
enable_book_cover,
)


if __name__ == "__main__":
Expand Down
55 changes: 38 additions & 17 deletions kindle2notion/exporting.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@


def export_to_notion(
all_books: Dict,
enable_highlight_date: bool,
enable_book_cover: bool,
notion_api_auth_token: str,
notion_database_id: str,
all_books: Dict,
enable_highlight_date: bool,
enable_book_cover: bool,
notion_api_auth_token: str,
notion_database_id: str,
) -> None:
print("Initiating transfer...\n")

Expand Down Expand Up @@ -48,7 +48,7 @@ def export_to_notion(


def _prepare_aggregated_text_for_one_book(
clippings: List, enable_highlight_date: bool
clippings: List, enable_highlight_date: bool
) -> Tuple[str, str]:
# TODO: Special case for books with len(clippings) >= 100 characters. Character limit in a Paragraph block in Notion is 100
formatted_clippings = []
Expand Down Expand Up @@ -77,17 +77,17 @@ def _prepare_aggregated_text_for_one_book(


def _add_book_to_notion(
title: str,
author: str,
clippings_count: int,
formatted_clippings: list,
last_date: str,
notion_api_auth_token: str,
notion_database_id: str,
enable_book_cover: bool,
title: str,
author: str,
clippings_count: int,
formatted_clippings: list,
last_date_string: str,
notion_api_auth_token: str,
notion_database_id: str,
enable_book_cover: bool,
):
notion = notional.connect(auth=notion_api_auth_token)
last_date = datetime.strptime(last_date, "%A, %d %B %Y %I:%M:%S %p")
last_date = __get_last_date_from_string(last_date_string)

# Condition variables
title_exists = False
Expand Down Expand Up @@ -131,7 +131,19 @@ def _add_book_to_notion(
)
# page_content = _update_book_with_clippings(formatted_clippings)
page_content = Paragraph["".join(formatted_clippings)]
notion.blocks.children.append(new_page, page_content)
page_content_text_length: int = len(page_content.paragraph.rich_text)
MAX_LENGTH = 100
# Handles notion limitation
if page_content_text_length > MAX_LENGTH:
original_page_content = page_content
num_of_loops = page_content_text_length // MAX_LENGTH
for loop_num in range(1, num_of_loops+1):
page_content = Paragraph[original_page_content.paragraph.rich_text[
(loop_num-1)*MAX_LENGTH:loop_num*MAX_LENGTH]
]
notion.blocks.children.append(new_page, page_content)
else:
notion.blocks.children.append(new_page, page_content)
block_id = new_page.id
if enable_book_cover:
# Fetch a book cover from Google Books if the cover for the page is not set
Expand All @@ -158,7 +170,7 @@ def _add_book_to_notion(
page_content = Paragraph["".join(formatted_clippings)]
notion.blocks.children.append(page, page_content)
# TODO: Delete existing page children (or figure out how to find changes to be made by comparing it with local json file.)
current_clippings_count = int(str(page["Highlights"]))
current_clippings_count = int(float(str(page["Highlights"])))
page["Highlights"] = Number[clippings_count]
page["Last Highlighted"] = Date[last_date.isoformat()]
page["Last Synced"] = Date[datetime.now().isoformat()]
Expand All @@ -174,6 +186,15 @@ def _add_book_to_notion(
return message


def __get_last_date_from_string(last_date_string: str) -> datetime:
if not last_date_string:
return datetime.now()
try:
return datetime.strptime(last_date_string, "%A, %d %B %Y %I:%M:%S %p")
except ValueError:
# Datetime format is not English, retrying with non AM-PM format
return datetime.strptime(last_date_string, "%A, %d %B %Y %H:%M:%S")

# def _create_rich_text_object(text):
# if "Note: " in text:
# # Bold text
Expand Down
Empty file.
38 changes: 38 additions & 0 deletions kindle2notion/languages/enums.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
from enum import Enum


class Locale(Enum):
# Enum containing languages
ENGLISH = "en"
SPANISH = "es"

def __str__(self):
return self.value


class Word(Enum):
# For each word, we have to handle different languages
NOTE = {
Locale.ENGLISH: "note",
Locale.SPANISH: "nota"
}
LOCATION = {
Locale.ENGLISH: "location",
Locale.SPANISH: "posición",
}
PAGE = {
Locale.ENGLISH: "page",
Locale.SPANISH: "página",
}
DATE_ADDED = {
Locale.ENGLISH: "added on",
Locale.SPANISH: "añadido el",
}
# Date formats also depend on language
DATE_FORMAT = {
Locale.ENGLISH: "%A, %d %B %Y %I:%M:%S %p",
Locale.SPANISH: "%A, %d %B %Y %H:%M:%S",
}

def __str__(self, language=Locale.ENGLISH):
return self.value[language]
24 changes: 24 additions & 0 deletions kindle2notion/languages/word_detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from typing import List

from kindle2notion.languages.enums import Word, Locale


class WordDetector:

def __init__(self, languages: List[Locale]):
self.languages = languages
self.language_words = {lang: set() for lang in languages}

for word in Word:
for lang in word.value:
self.language_words[lang].add(word.value[lang])

def detect(self, text):
scores = {lang: 0 for lang in self.languages}
for lang, words in self.language_words.items():
scores[lang] = sum([len(word) for word in words if self.has_word(text, word)])
return max(scores, key=scores.get)

def has_word(self, text, word):
return word.lower() in text.lower()

Loading