Skip to content

Commit

Permalink
Merge pull request #649 from AbdullahM0hamed/ryuanime
Browse files Browse the repository at this point in the history
Update ryuanime
  • Loading branch information
AbdullahM0hamed committed Mar 21, 2021
2 parents e8ed14f + 2c7c37b commit a47a524
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 18 deletions.
7 changes: 6 additions & 1 deletion anime_downloader/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,12 @@
},
'ryuanime': {
'version': 'subbed',
'server': 'trollvid',
'servers': [
'vidstream',
'mp4upload',
'xstreamcdn',
'trollvid'
]
},
'animekisa': {
'server': 'gcloud',
Expand Down
15 changes: 12 additions & 3 deletions anime_downloader/extractors/vidstream.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ def _get_data(self):
}

url = self.url.replace('https:////', 'https://')
url = url.replace('https://gogo-stream.com/download', 'https://gogo-stream.com/server.php')
url = url.replace('https://gogo-stream.com/download',
'https://gogo-stream.com/server.php')
soup = helpers.soupify(helpers.get(url))
linkserver = soup.select('li.linkserver')
logger.debug('Linkserver: {}'.format(linkserver))
Expand Down Expand Up @@ -64,7 +65,11 @@ def _get_link(self, soup):
# <input type="hidden" id="title" value="Yakusoku+no+Neverland">
# <input type="hidden" id="typesub" value="SUB">
# Used to create a download url.
soup_id = soup.select('input#id')[0]['value']
try:
soup_id = soup.select('input#id')[0]['value']
except IndexError:
return self._get_link_new(soup)

soup_title = soup.select('input#title')[0]['value']
soup_typesub = soup.select('input#typesub')[0].get('value', 'SUB')

Expand Down Expand Up @@ -103,11 +108,15 @@ def _get_link(self, soup):

return {'stream_url': ''}

def _get_link_new(self, soup):
link_buttons = soup.select('div.mirror_link')[
0].select('div.dowload > a[href]')
return {'stream_url': link_buttons[0].get('href')}


class Extractor:
"""dummy class to prevent changing self"""

def __init__(self, dictionary):
for k, v in dictionary.items():
setattr(self, k, v)

35 changes: 21 additions & 14 deletions anime_downloader/sites/ryuanime.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,25 @@ class RyuAnime(Anime, sitename='ryuanime'):

@classmethod
def search(cls, query):
soup = helpers.soupify(helpers.get("https://ryuanime.com/browse-anime", params={"search": query}))
result_data = soup.select("li.list-inline-item:has(p.anime-name):has(a.ani-link)")
soup = helpers.soupify(helpers.get(
"https://ryuanime.com/browse-anime", params={"search": query}))
result_data = soup.select(
"li.list-inline-item:has(p.anime-name):has(a.ani-link)")

search_results = [
SearchResult(
title=result.select("p.anime-name")[0].text,
url='https://ryuanime.com' + result.select("a.ani-link")[0].get("href")
url='https://ryuanime.com' +
result.select("a.ani-link")[0].get("href")
)
for result in result_data
]
return search_results

def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
episodes = ['https://ryuanime.com' + x.get("href") for x in soup.select("li.jt-di > a")]
episodes = ['https://ryuanime.com' +
x.get("href") for x in soup.select("li.jt-di > a")]

if len(episodes) == 0:
logger.warning("No episodes found")
Expand All @@ -49,17 +53,16 @@ def _scrape_metadata(self):


class RyuAnimeEpisode(AnimeEpisode, sitename='ryuanime'):
def getLink(self, name, _id):
if name == "trollvid":
return "https://trollvid.net/embed/" + _id
elif name == "mp4upload":
return f"https://mp4upload.com/embed-{_id}.html"
elif name == "xstreamcdn":
return f"https://xstreamcdn.com/v/" + _id

def _get_sources(self):
page = helpers.get(self.url).text

server_links = {
'trollvid': 'https://trollvid.net/embed/{}',
'mp4upload': 'https://mp4upload.com/embed-{}.html',
'xstreamcdn': 'https://xstreamcdn.com/v/{}',
'vidstreaming': 'https://vidstreaming.io/download?id={}'
}

# Example:
"""
[
Expand All @@ -69,16 +72,20 @@ def _get_sources(self):
}
]
"""
hosts = json.loads(re.search(r"let.*?episode.*?videos.*?(\[\{.*?\}\])", page).group(1))
hosts = json.loads(
re.search(r"let.*?episode.*?videos.*?(\[\{.*?\}\])", page).group(1))

sources_list = []

for host in hosts:
name = host.get("host")
_id = host.get("id")
link = self.getLink(name, _id)
link = server_links[name].format(_id)

if link:
if name == 'vidstreaming':
name = 'vidstream'

sources_list.append({
"extractor": name,
"url": link,
Expand Down

0 comments on commit a47a524

Please sign in to comment.