Skip to content

Commit

Permalink
Merge pull request #681 from ArjixWasTaken/patch-32
Browse files Browse the repository at this point in the history
Added provider wcostream
  • Loading branch information
AbdullahM0hamed committed May 23, 2021
2 parents 5d63f2b + 08c51d4 commit e3889d0
Show file tree
Hide file tree
Showing 6 changed files with 120 additions and 0 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ Yeah. Me too! That's why this tool exists.
- Vidstream
- Voiranime
- Vostfree
- Wcostream

Sites that require Selenium **DO NOT** and **WILL NOT** work on mobile operating systems

Expand Down
4 changes: 4 additions & 0 deletions anime_downloader/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,10 @@
'servers': ['vidstream', 'gcloud', 'yourupload', 'hydrax'],
'version': 'subbed',
},
'wcostream': {
'servers': ['vidstreampro', 'mcloud'],
'version': 'subbed',
},
'animeflix': {
'server': 'AUEngine',
'fallback_servers': ['FastStream'],
Expand Down
6 changes: 6 additions & 0 deletions anime_downloader/extractors/init.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,12 @@
'regex': 'yourupload',
'class': 'Yourupload'
},
{
'sitename': 'wcostream',
'modulename': 'wcostream',
'regex': 'wcostream',
'class': 'WcoStream'
},
{
'sitename': 'vidstream',
'modulename': 'vidstream',
Expand Down
37 changes: 37 additions & 0 deletions anime_downloader/extractors/wcostream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
import re


class WcoStream(BaseExtractor):
def _get_data(self):
try:
if self.url.startswith('https://vidstream.pro/e'):
base_url = 'https://vidstream.pro'
elif self.url.startswith('https://mcloud.to/e/'):
base_url = 'https://mcloud.to'
else:
return []

html = helpers.get(self.url, referer='https://wcostream.cc/')
id_ = re.findall(r"/e/(.*?)\?domain", self.url)[0]
skey = re.findall(r"skey\s=\s['\"](.*?)['\"];", html.text)[0]

apiLink = f"{base_url}/info/{id_}?domain=wcostream.cc&skey={skey}"
referer = f"{base_url}/e/{id_}?domain=wcostream.cc"

response = helpers.get(apiLink, referer=referer).json()

if response['success'] is True:
sources = [
{
'stream_url': x['file']
}
for x in response['media']['sources']
]
return sources
else:
return []

except Exception:
return {"stream_url": ''}
1 change: 1 addition & 0 deletions anime_downloader/sites/init.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
('vidstream', 'vidstream', 'VidStream'),
# ('voiranime', 'voiranime', 'VoirAnime'),
('vostfree', 'vostfree', 'VostFree'),
('wcostream', 'wcostream', 'WcoStream'),
]


Expand Down
71 changes: 71 additions & 0 deletions anime_downloader/sites/wcostream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.extractors import get_extractor
from anime_downloader.sites import helpers

import re


class WcoStream(Anime, sitename='wcostream'):

sitename = 'wcostream'

@classmethod
def search(cls, query):
soup = helpers.soupify(helpers.get(
'https://wcostream.cc/search',
params={'keyword': query}
))
results = soup.select('.film_list-wrap > .flw-item')

return [
SearchResult(
title=x.find('img')['alt'],
url=x.find('a')['href'],
meta={'year': x.select_one('.fd-infor > .fdi-item').text.strip()},
meta_info={
'version_key_dubbed': '(Dub)'
}
)
for x in results
]

def _scrape_episodes(self):
soup = helpers.soupify(helpers.get(self.url))
episodes = soup.select_one('#content-episodes').select('ul.nav > li.nav-item') # noqa
return [
x.find('a')['href']
for x in episodes
if 'https://wcostream.cc/watch' in x.find('a')['href']
]

def _scrape_metadata(self):
soup = helpers.soupify(helpers.get(self.url))
self.title = soup.select_one(
'meta[property="og:title"]'
)['content'].split('Episode')[0].strip()


class WcoStreamEpisode(AnimeEpisode, sitename='wcostream'):
def _get_sources(self):
soup = helpers.soupify(helpers.get(self.url))
servers = soup.select("#servers-list > ul > li")
servers = [
{
"name": server.find('span').text.strip(),
"link": server.find('a')['data-embed']
}
for server in servers
]

servers = sorted(servers, key=lambda x: x['name'].lower() in self.config['servers'][0].lower())[::-1] # noqa
sources = []

for server in servers:
ext = get_extractor('wcostream')(
server['link'],
quality=self.quality,
headers={}
)
sources.extend([('no_extractor', x['stream_url']) for x in ext._get_data()]) # noqa

return sources

0 comments on commit e3889d0

Please sign in to comment.