Skip to content

Commit

Permalink
Actualizados
Browse files Browse the repository at this point in the history
html: mediaserver, para que tome los .pyc en la version exe para windows
anitoostv: fix
ciberpeliculashd: fix enlaces
cine24h: fix trailers
descargacineclasico: agregado a la busqueda global
erotik: fix enlaces
filesmonster_catalogue, freecambay, hentaiespanol: eliminados, web no existe
pelis24: fix trailers
pelisplay: fix enlaces
serieslan: fix
tvpelis: nuevo canal
jawcloud: actualizado test_video_exists
mystream: nuevo server
rutube: nuevo server
upvid: cambios cosmeticos
  • Loading branch information
Intel11 committed Dec 12, 2018
1 parent 6543e55 commit 8227575
Show file tree
Hide file tree
Showing 35 changed files with 895 additions and 1,025 deletions.
2 changes: 1 addition & 1 deletion mediaserver/platformcode/controllers/html.py
Expand Up @@ -626,7 +626,7 @@ def show_channel_settings(self, list_controls=None, dict_values=None, caption=""
# Obtenemos el canal desde donde se ha echo la llamada y cargamos los settings disponibles para ese canal
if not channelpath:
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
channelname = os.path.basename(channelpath).replace(".py", "")
channelname = os.path.basename(channelpath).split(".")[0]
ch_type = os.path.basename(os.path.dirname(channelpath))

# Si no tenemos list_controls, hay que sacarlos del json del canal
Expand Down
4 changes: 2 additions & 2 deletions plugin.video.alfa/addon.xml
Expand Up @@ -10,8 +10,8 @@
<extension point="xbmc.addon.metadata">
<summary lang="es">Navega con Kodi por páginas web.</summary>
<assets>
<icon>logo-cumple.png</icon>
<fanart>fanart1.jpg</fanart>
<icon>logo-n.jpg</icon>
<fanart>fanart-xmas.jpg</fanart>
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
Expand Down
10 changes: 5 additions & 5 deletions plugin.video.alfa/channels/anitoonstv.py
Expand Up @@ -32,11 +32,11 @@ def mainlist(item):

itemlist = list()

itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host+"/lista-de-anime.php",
itemlist.append(Item(channel=item.channel, action="lista", title="Series", contentTitle="Series", url=host+"/lista-de-anime.php",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", contentTitle="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
thumbnail=thumb_series, range=[0,19] ))
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", contentTitle="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
thumbnail=thumb_series, range=[0,19]))
Expand Down Expand Up @@ -109,14 +109,14 @@ def lista(item):
context2 = autoplay.context
context.extend(context2)
scrapedurl=host+scrapedurl
if item.title!="Series":
if item.contentTitle!="Series":
itemlist.append(item.clone(title=scrapedtitle, contentTitle=show,url=scrapedurl,
thumbnail=scrapedthumbnail, action="findvideos", context=context))
else:
itemlist.append(item.clone(title=scrapedtitle, contentSerieName=show,url=scrapedurl, plot=scrapedplot,
thumbnail=scrapedthumbnail, action="episodios", context=context))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', action='lista'))
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', contentTitle=item.title, action='lista'))

return itemlist

Expand Down
6 changes: 3 additions & 3 deletions plugin.video.alfa/channels/ciberpeliculashd.py
Expand Up @@ -232,11 +232,11 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, 'iframe-.*?src="([^"]+)')
data = httptools.downloadpage(url).data
patron = '<a href="([^"]+)'
patron = '(?i)src=&quot;([^&]+)&'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
if ".gif" in scrapedurl:
continue
title = "Ver en: %s"
itemlist.append(item.clone(action = "play",
title = title,
Expand Down
5 changes: 3 additions & 2 deletions plugin.video.alfa/channels/cine24h.json
Expand Up @@ -3,14 +3,15 @@
"name": "Cine24H",
"active": true,
"adult": false,
"language": ["lat", "cast", "eng"],
"language": ["lat", "cast", "vose"],
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
"thumbnail": "https://cine24h.net/wp-content/uploads/2018/06/cine24hv2.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vose"
"vose",
"direct"
],
"settings": [
{
Expand Down
8 changes: 4 additions & 4 deletions plugin.video.alfa/channels/cine24h.py
Expand Up @@ -138,10 +138,10 @@ def peliculas(item):
contentType = 'movie'
title = scrapedtitle

itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer"))
itemlist.append(Item(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer"))

tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

Expand Down
10 changes: 10 additions & 0 deletions plugin.video.alfa/channels/descargacineclasico.json
Expand Up @@ -8,5 +8,15 @@
"thumbnail": "descargacineclasico2.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}
68 changes: 23 additions & 45 deletions plugin.video.alfa/channels/erotik.py
Expand Up @@ -3,29 +3,30 @@
import re
import urlparse

from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger

host = "https://www.youfreeporntube.net"

def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos",
url="http://www.ero-tik.com/newvideos.html?&page=1"))
url= host + "/new-clips.html?&page=1"))
itemlist.append(
Item(channel=item.channel, action="categorias", title="Categorias", url="http://www.ero-tik.com/browse.html"))
itemlist.append(Item(channel=item.channel, action="lista", title="Top ultima semana",
url="http://www.ero-tik.com/topvideos.html?do=recent"))
Item(channel=item.channel, action="categorias", title="Categorias", url=host + "/browse.html"))
itemlist.append(Item(channel=item.channel, action="lista", title="Populares",
url=host + "/topvideo.html?page=1"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
url="http://www.ero-tik.com/search.php?keywords="))

url=host + "/search.php?keywords="))
return itemlist


def search(item, texto):
logger.info()

texto = texto.replace(" ", "+")
item.url = "{0}{1}".format(item.url, texto)
try:
Expand All @@ -41,96 +42,73 @@ def search(item, texto):
def categorias(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
patron = '<div class="pm-li-category"><a href="([^"]+)">.*?.<h3>(.*?)</h3></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, actriz in matches:
itemlist.append(Item(channel=item.channel, action="listacategoria", title=actriz, url=url))

return itemlist


def lista(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)

# Extrae las entradas de la pagina seleccionada
patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>'

matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []

for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = scrapedtitle.strip()

# Añade al listado
itemlist.append(Item(channel=item.channel, action="play", thumbnail=thumbnail, fanart=thumbnail, title=title,
fulltitle=title, url=url,
viewmode="movie", folder=True))

paginacion = scrapertools.find_single_match(data,
'<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">')

if paginacion:
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente",
url="http://ero-tik.com/" + paginacion))

url=host + "/" + paginacion))
return itemlist


def listacategoria(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)

# Extrae las entradas de la pagina seleccionada
patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>'

matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []

for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = scrapedtitle.strip()

# Añade al listado
itemlist.append(
Item(channel=item.channel, action="play", thumbnail=thumbnail, title=title, fulltitle=title, url=url,
viewmode="movie", folder=True))

paginacion = scrapertools.find_single_match(data,
'<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">')

if paginacion:
itemlist.append(
Item(channel=item.channel, action="listacategoria", title=">> Página Siguiente", url=paginacion))

return itemlist


def play(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.unescape(data)
logger.info(data)
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.title = item.title

data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, 'Playerholder.*?src="([^"]+)"')
if "tubst.net" in item.url:
url = scrapertools.find_single_match(data, 'itemprop="embedURL" content="([^"]+)')
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
item.url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
22 changes: 0 additions & 22 deletions plugin.video.alfa/channels/filesmonster_catalogue.json

This file was deleted.

0 comments on commit 8227575

Please sign in to comment.