Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
uk365 committed Jan 30, 2024
1 parent 18a82c8 commit 87bbfa5
Showing 1 changed file with 31 additions and 32 deletions.
63 changes: 31 additions & 32 deletions FZBypass/core/bypass_scrape.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,41 +164,40 @@ async def toonworld4all(url: str):
def re_findall(regex: str, text: str) -> List[str]:
return re.findall(regex, text)

# async def tamilmv(url):
# req=requests.get(url)
# soup=bs(req.content,'html.parser')
# magnets=soup.findAll('a')
# links=[]
# #for i in magnets:
# for no, i in enumerate(magnets, start=1):
# ax = re_findall(MAGNET_REGEX, i.get_text())
# try:
# if i.get_text()=="MAGNET" or i.find('img').get('alt')=="magnet.png":
# j=i.find_previous_sibling('strong')
# links.append({"name":j.get_text(),"link":i.get('href')})
# except:
# pass
# print(links)
async def tamilmv(url):
req=requests.get(url)
soup=bs(req.content,'html.parser')
magnets=soup.findAll('a')
links=[]
for i in magnets:
#for no, i in enumerate(magnets, start=1):
try:
if i.get_text()=="MAGNET" or i.find('img').get('alt')=="magnet.png":
j=i.find_previous_sibling('strong')
links.append({"name":j.get_text(),"link":i.get('href')})
except:
pass
print(links)

# parse_data += f'''
parse_data += f'''
# {no}. <code>{links['name']}</code>
# ┖ <b>Links :</b> <a href="https://t.me/share/url?url={links['link']}"><b>Magnet </b>🧲</a> | <a href="{links['link']}"><b>Torrent 🌐</b></a>'''
<code>{links['name']}</code>
┖ <b>Links :</b> <a href="https://t.me/share/url?url={links['link']}"><b>Magnet </b>🧲</a> | <a href="{links['link']}"><b>Torrent 🌐</b></a>'''

# return parse_data
return parse_data


async def tamilmv(url):
cget = create_scraper().get
resp = cget(url, allow_redirects=False)
soup = BeautifulSoup(resp.text, 'html.parser')
mag = soup.select('a[href^="magnet:?xt=urn:btih:"]')
tor = soup.select('a[data-fileext="torrent"]')
parse_data = f"<b><u>{soup.title.string}</u></b>"
for no, (t, m) in enumerate(zip(tor, mag), start=1):
filename = sub(r"www\S+|\- |\.torrent", '', t.string) if t is not None and t.string is not None else ""
parse_data += f'''
# async def tamilmv(url):
# cget = create_scraper().get
# resp = cget(url, allow_redirects=False)
# soup = BeautifulSoup(resp.text, 'html.parser')
# mag = soup.select('a[href^="magnet:?xt=urn:btih:"]')
# tor = soup.select('a[data-fileext="torrent"]')
# parse_data = f"<b><u>{soup.title.string}</u></b>"
# for no, (t, m) in enumerate(zip(tor, mag), start=1):
# filename = sub(r"www\S+|\- |\.torrent", '', t.string) if t is not None and t.string is not None else ""
# parse_data += f'''

{no}. <code>{filename}</code>
┖ <b>Links :</b> <a href="https://t.me/share/url?url={m['href'].split('&')[0]}"><b>Magnet </b>🧲</a> | <a href="{t['href']}"><b>Torrent 🌐</b></a>'''
return parse_data
# {no}. <code>{filename}</code>
# ┖ <b>Links :</b> <a href="https://t.me/share/url?url={m['href'].split('&')[0]}"><b>Magnet </b>🧲</a> | <a href="{t['href']}"><b>Torrent 🌐</b></a>'''
# return parse_data

0 comments on commit 87bbfa5

Please sign in to comment.