Skip to content

Commit

Permalink
more docs
Browse files Browse the repository at this point in the history
  • Loading branch information
vn-ki committed Oct 15, 2018
1 parent 07c1c5e commit 078dc53
Show file tree
Hide file tree
Showing 6 changed files with 119 additions and 16 deletions.
2 changes: 1 addition & 1 deletion Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ verify_ssl = true
name = "pypi"

[packages]
anime-downloader = {editable = true, path = "."}
anime-downloader = {editable = true, path = ".", extras = ["cloudflare"]}

[dev-packages]
sphinx = "*"
Expand Down
23 changes: 16 additions & 7 deletions Pipfile.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

97 changes: 90 additions & 7 deletions anime_downloader/sites/anime.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@ class BaseAnime:
Attributes
----------
sitename: string
sitename: str
name of the site
title: string
title: str
Title of the anime
meta: dict
metadata about the anime. [Can be empty]
Expand All @@ -53,8 +53,15 @@ def search(cls, query):
"""
Search searches for the anime using the query given.
query :
query is
Parameters
----------
query: str
query is the query keyword to be searched.
Returns
-------
list
List of :py:class:`~anime_downloader.sites.anime.SearchResult`
"""
return

Expand All @@ -72,7 +79,8 @@ def __init__(self, url=None, quality='720p',

if not _skip_online_data:
logging.info('Extracting episode info from page')
self.get_data()
self._episode_urls = self.get_data()
self._len = len(self._episode_urls)

@classmethod
def verify_url(self, url):
Expand All @@ -81,6 +89,28 @@ def verify_url(self, url):
return False

def get_data(self):
"""
get_data is called inside the :code:`__init__` of
:py:class:`~anime_downloader.sites.anime.BaseAnime`. It is used to get
the necessary data about the anime and it's episodes.
This function calls
:py:class:`~anime_downloader.sites.anime.BaseAnime._scarpe_episodes`
and
:py:class:`~anime_downloader.sites.anime.BaseAnime._scrape_metadata`
TODO: Refactor this so that classes which need not be soupified don't
have to overload this function.
Returns
-------
list
A list of tuples of episodes containing episode name and
episode url.
Ex::
[('1', 'https://9anime.is/.../...', ...)]
"""
self._episode_urls = []
r = requests.get(self.url, headers=desktop_headers)
soup = BeautifulSoup(r.text, 'html.parser')
Expand Down Expand Up @@ -125,9 +155,34 @@ def __str__(self):
return self.title

def _scarpe_episodes(self, soup):
"""
_scarpe_episodes is function which has to be overridden by the base
classes to scrape the episode urls from the web page.
Parameters
----------
soup: `bs4.BeautifulSoup`
soup is the html of the anime url after passing through
BeautifulSoup.
Returns
-------
:code:`list` of :code:`str`
A list of episode urls.
"""
return

def _scrape_metadata(self, soup):
"""
_scrape_metadata is function which has to be overridden by the base
classes to scrape the metadata of anime from the web page.
Parameters
----------
soup: :py:class:`bs4.BeautifulSoup`
soup is the html of the anime url after passing through
BeautifulSoup.
"""
return


Expand Down Expand Up @@ -217,12 +272,40 @@ def download(self, force=False, path=None,

downloader.download()


class SearchResult:
def __init__(self, title, url, poster):
"""
SearchResult class holds the search result of a search done by an Anime
class
Parameters
----------
title: str
Title of the anime.
url: str
URL of the anime
poster: str
URL for the poster of the anime.
meta: dict
Additional metadata regarding the anime.
Attributes
----------
title: str
Title of the anime.
url: str
URL of the anime
poster: str
URL for the poster of the anime.
meta: dict
Additional metadata regarding the anime.
"""

def __init__(self, title, url, poster, meta=''):
self.title = title
self.url = url
self.poster = poster
self.meta = ''
self.meta = meta

def __repr__(self):
return '<SearchResult Title: {} URL: {}>'.format(self.title, self.url)
Expand Down
8 changes: 8 additions & 0 deletions docs/advanced/custom_site.rst
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
Writing your own custom site class
**********************************

:code:`anime_downloader` is built with easy extensibility in mind.

Each of the site (in the tool) can roughly be classfied into two.

- Sites which don't use cloudflare DDoS protection. Ex: :py:class:`~anime_downloader.sites.nineanime.NineAnime`
- Sites which use cloudflare DDoS protection. Ex: :py:class:`~anime_downloader.sites.kissanime.KissAnime`

Sites which use cloudflare have the base class :py:class:`~anime_downloader.sites.anime.BaseAnime`. Sites which don't have the base class :py:class:`~anime_downloader.sites.baseanimecf.BaseAnimeCF`.
4 changes: 3 additions & 1 deletion docs/api/anime.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,6 @@ Base classes
.. automodule:: anime_downloader.sites.anime

.. autoclass:: anime_downloader.sites.anime.BaseAnime
:members: search
:members: search, get_data, _scarpe_episodes, _scrape_metadata

.. autoclass:: anime_downloader.sites.anime.SearchResult
1 change: 1 addition & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]

# Add any paths that contain templates here, relative to this directory.
Expand Down

0 comments on commit 078dc53

Please sign in to comment.