Merge pull request #84 from Alfa-beto/channels
Ajustes y reparaciones de canales
This commit is contained in:
@@ -41,7 +41,7 @@ def mainlist(item):
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
|
||||
plot=plot, server="directo", folder=False))
|
||||
plot=plot,folder=False))
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Extrae la página siguiente
|
||||
|
||||
@@ -273,13 +273,13 @@ def listado_series(item):
|
||||
def fichas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
textoidiomas=''
|
||||
infoLabels=dict()
|
||||
## Carga estados
|
||||
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
|
||||
|
||||
if item.title == "Buscar...":
|
||||
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
|
||||
|
||||
s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
|
||||
'<h3 class="section-title">')
|
||||
|
||||
@@ -320,10 +320,12 @@ def fichas(item):
|
||||
|
||||
if scrapedlangs != ">":
|
||||
textoidiomas = extrae_idiomas(scrapedlangs)
|
||||
#Todo Quitar el idioma
|
||||
title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])")
|
||||
|
||||
if scrapedrating != ">":
|
||||
valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>', r'\1,\2', scrapedrating)
|
||||
infoLabels['rating']=valoracion
|
||||
title += bbcode_kodi2html(" ([COLOR orange]" + valoracion + "[/COLOR])")
|
||||
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
@@ -349,7 +351,7 @@ def fichas(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
|
||||
show=show, folder=True, contentType=contentType, contentTitle=contentTitle,
|
||||
language =textoidiomas+'x'))
|
||||
language =textoidiomas, infoLabels=infoLabels))
|
||||
|
||||
## Paginación
|
||||
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)">.raquo;</a>')
|
||||
@@ -794,11 +796,14 @@ def agrupa_datos(data):
|
||||
|
||||
def extrae_idiomas(bloqueidiomas):
|
||||
logger.info("idiomas=" + bloqueidiomas)
|
||||
# Todo cambiar por lista
|
||||
#textoidiomas=[]
|
||||
textoidiomas = ''
|
||||
patronidiomas = '([a-z0-9]+).png"'
|
||||
idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas)
|
||||
textoidiomas = ""
|
||||
for idioma in idiomas:
|
||||
textoidiomas = textoidiomas + idioma.upper() + " "
|
||||
textoidiomas = textoidiomas + idioma +" "
|
||||
#textoidiomas.append(idioma.upper())
|
||||
|
||||
return textoidiomas
|
||||
|
||||
|
||||
@@ -217,7 +217,7 @@ def episodios(item):
|
||||
idiomas = " ".join(["[%s]" % IDIOMAS.get(language, "OVOS") for language in
|
||||
re.findall("banderas/([^\.]+)", flags, re.MULTILINE)])
|
||||
filter_lang = idiomas.replace("[", "").replace("]", "").split(" ")
|
||||
display_title = "%s - %s" % (item.show, title)
|
||||
display_title = "%s - %s %s" % (item.show, title, idiomas)
|
||||
# logger.debug("Episode found %s: %s" % (display_title, urlparse.urljoin(HOST, url)))
|
||||
itemlist.append(item.clone(title=display_title, url=urlparse.urljoin(HOST, url),
|
||||
action="findvideos", plot=plot, fanart=fanart, language=filter_lang))
|
||||
|
||||
@@ -40,7 +40,7 @@ def novedades(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
|
||||
data = re.sub(r"<!--.*?-->", "", data)
|
||||
|
||||
logger.debug(data)
|
||||
patron = '<a title="([^"]+)" href="([^"]+)".*?>'
|
||||
patron += "<img.*?src='([^']+)'"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -49,11 +49,19 @@ def novedades(item):
|
||||
# patron = "^(.*?)(?:Ya Disponible|Disponible|Disponbile|disponible|\(Actualizada\))$"
|
||||
# match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
language=''
|
||||
# language = scrapertools.find_multiple_matches(title,'(Vose|Español|Latino)')
|
||||
# for lang in language:
|
||||
# title = title.replace(lang,'')
|
||||
# title = title.replace ('Disponible','')
|
||||
# title = title.replace('Ya', '')
|
||||
# title = title.strip()
|
||||
|
||||
show = scrapertools.find_single_match(title, "^(.+?) \d+[x|X]\d+")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, scrapedurl), show=show,
|
||||
action="episodios", thumbnail=scrapedthumb,
|
||||
context=filtertools.context(item, list_idiomas, CALIDADES)))
|
||||
context=filtertools.context(item, list_idiomas, CALIDADES), language=language))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -225,7 +233,7 @@ def parse_videos(item, tipo, data):
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, link), action="play",
|
||||
show=item.show, language=IDIOMAS.get(language, "OVOS"), quality=quality,
|
||||
fulltitle=item.title))
|
||||
fulltitle=item.title, server=server))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
4
plugin.video.alfa/channels/serieslatino.py
Executable file → Normal file
4
plugin.video.alfa/channels/serieslatino.py
Executable file → Normal file
@@ -56,7 +56,7 @@ def mainlist(item):
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Series", action="lista", thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
|
||||
itemlist.append(Item(channel= item.channel, title="Series", action="lista", thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
|
||||
fanart='https://s27.postimg.org/iahczwgrn/series.png', extra='peliculas/',
|
||||
url=host + 'lista-de-series/'))
|
||||
|
||||
@@ -136,7 +136,7 @@ def temporadas(item):
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='episodiosxtemp', url=item.url, title=title,
|
||||
contentSerieName=item.contentSerieName, thumbnail=thumbnail, plot=plot, fanart=fanart,
|
||||
contentSeasonNumber=contentSeasonNumber, infoLabels=item.infoLabels))
|
||||
contentSeasonNumber=contentSeasonNumber, infoLabels=infoLabels))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
|
||||
@@ -11,12 +11,14 @@ from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
HOST = "http://www.seriespapaya.com"
|
||||
|
||||
IDIOMAS = {'es': 'Español', 'lat': 'Latino', 'in': 'Inglés', 'ca': 'Catalán', 'sub': 'VOS'}
|
||||
IDIOMAS = {'es': 'Español', 'lat': 'Latino', 'in': 'Inglés', 'ca': 'Catalán', 'sub': 'VOSE', 'Español Latino':'lat',
|
||||
'Español Castellano':'es', 'Sub Español':'VOSE'}
|
||||
list_idiomas = IDIOMAS.values()
|
||||
CALIDADES = ['360p', '480p', '720p HD', '1080p HD']
|
||||
|
||||
@@ -67,22 +69,31 @@ def series_por_letra_y_grupo(item):
|
||||
"letra": item.letter.lower()
|
||||
}
|
||||
data = httptools.downloadpage(url, post=urllib.urlencode(post_request)).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = '<div class=list_imagen><img src=(.*?) \/>.*?<div class=list_titulo><a href=(.*?) style=.*?inherit;>(.*?)'
|
||||
patron +='<.*?justify>(.*?)<.*?Año:<\/b>.*?(\d{4})<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
#series = re.findall(
|
||||
# 'list_imagen.+?src="(?P<img>[^"]+).+?<div class="list_titulo"><a[^>]+href="(?P<url>[^"]+)[^>]+>(.*?)</a>', data,
|
||||
# re.MULTILINE | re.DOTALL)
|
||||
|
||||
series = re.findall(
|
||||
'list_imagen.+?src="(?P<img>[^"]+).+?<div class="list_titulo"><a[^>]+href="(?P<url>[^"]+)[^>]+>(.*?)</a>', data,
|
||||
re.MULTILINE | re.DOTALL)
|
||||
|
||||
for img, url, name in series:
|
||||
itemlist.append(item.clone(
|
||||
for img, url, name, plot, year in matches:
|
||||
new_item= Item(
|
||||
channel = item.channel,
|
||||
action="episodios",
|
||||
title=name,
|
||||
show=name,
|
||||
url=urlparse.urljoin(HOST, url),
|
||||
thumbnail=urlparse.urljoin(HOST, img),
|
||||
context=filtertools.context(item, list_idiomas, CALIDADES)
|
||||
))
|
||||
context=filtertools.context(item, list_idiomas, CALIDADES),
|
||||
plot = plot,
|
||||
infoLabels={'year':year}
|
||||
)
|
||||
if year:
|
||||
tmdb.set_infoLabels_item(new_item)
|
||||
itemlist.append(new_item)
|
||||
|
||||
if len(series) == 8:
|
||||
if len(matches) == 8:
|
||||
itemlist.append(item.clone(title="Siguiente >>", action="series_por_letra_y_grupo", extra=item.extra + 1))
|
||||
|
||||
if item.extra > 0:
|
||||
@@ -94,13 +105,16 @@ def series_por_letra_y_grupo(item):
|
||||
def novedades(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(HOST).data
|
||||
shows = re.findall('sidebarestdiv[^<]+<a[^<]+title="([^"]*)[^<]+href="([^"]*)[^<]+<img[^<]+src="([^"]+)', data,
|
||||
re.MULTILINE | re.DOTALL)
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = 'sidebarestdiv><a title=(.*?\d+X\d+) (.*?) href=(.*?)>.*?src=(.*?)>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
itemlist = []
|
||||
|
||||
for title, url, img in shows:
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=urlparse.urljoin(HOST, url), thumbnail=img))
|
||||
for title, language,url, img in matches:
|
||||
language = IDIOMAS[language]
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=urlparse.urljoin(HOST, url), thumbnail=img,
|
||||
language=language))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -133,7 +147,6 @@ def episodios(item):
|
||||
|
||||
itemlist = []
|
||||
for url, title, langs in episodes:
|
||||
logger.debug("langs %s" % langs)
|
||||
languages = " ".join(
|
||||
["[%s]" % IDIOMAS.get(lang, lang) for lang in re.findall('images/s-([^\.]+)', langs)])
|
||||
filter_lang = languages.replace("[", "").replace("]", "").split(" ")
|
||||
@@ -206,6 +219,5 @@ def play(item):
|
||||
logger.info("play: %s" % item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, "location.href='([^']+)")
|
||||
logger.debug("Video URL = %s" % video_url)
|
||||
itemlist = servertools.find_video_items(data=video_url)
|
||||
return itemlist
|
||||
|
||||
@@ -6,6 +6,7 @@ import urlparse
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
@@ -60,18 +61,18 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = re.sub(r'"|\n|\r|\t| |<br>', "", data)
|
||||
|
||||
listado = scrapertools.find_single_match(data,
|
||||
'<div id="sipeliculas" class="borde"><div class="izquierda">(.*?)<div class="derecha"><h2')
|
||||
logger.info('vergas' + listado)
|
||||
patron = '<li class="[^"]+"><a class="[^"]+" href="([^"]+)" title="Ver Película([^"]+)"><i></i><img.*?src="([^"]+)" alt="[^"]+"/>(.*?)</li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(listado)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, dataplot in matches:
|
||||
dataplot = scrapertools.find_single_match(data, '<div class="ttip"><h5>[^<]+</h5><p><span>([^<]+)</span>')
|
||||
itemlist.append(Item(channel=item.channel, action='findvideos', title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=dataplot, contentTitle=scrapedtitle, extra=item.extra))
|
||||
patron = '<a class="i" href="(.*?)".*?src="(.*?)".*?title=.*?>(.*?)<.*?span>(.*?)<.*?<p><span>(.*?)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(listado)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, plot in matches:
|
||||
itemlist.append(Item(channel=item.channel, action='findvideos', title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=plot, contentTitle=scrapedtitle, extra=item.extra,
|
||||
infoLabels ={'year':year}))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginacion
|
||||
if itemlist != []:
|
||||
patron = '<li[^<]+<a href="([^"]+)" title="[^"]+">Siguiente[^<]+</a></li>'
|
||||
@@ -96,22 +97,26 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = re.sub(r"'|\n|\r|\t| |<br>", "", data)
|
||||
|
||||
listado1 = scrapertools.find_single_match(data,
|
||||
'<div class="links" id="ver-mas-opciones"><h2 class="h2"><i class="[^"]+"></i>[^<]+</h2><ul class="opciones">(.*?)</ul>')
|
||||
patron1 = '<li ><a id="([^"]+)" rel="nofollow" href="([^"]+)" title="[^"]+" alt="([^"]+)"><span class="opcion"><i class="[^"]+"></i><u>[^<]+</u>[^<]+</span><span class="ico"><img src="[^"]+" alt="[^"]+"/>[^<]+</span><span>([^"]+)</span><span>([^"]+)</span></a></li>'
|
||||
matches = matches = re.compile(patron1, re.DOTALL).findall(listado1)
|
||||
for vidId, vidUrl, vidServer, idioma, calidad in matches:
|
||||
for vidId, vidUrl, vidServer, language, quality in matches:
|
||||
server = servertools.get_server_name(vidServer)
|
||||
if 'Sub' in language:
|
||||
language='sub'
|
||||
itemlist.append(Item(channel=item.channel, action='play', url=vidUrl, extra=vidId,
|
||||
title='Ver en ' + vidServer + ' | ' + idioma + ' | ' + calidad, thumbnail=item.thumbnail))
|
||||
title='Ver en ' + vidServer + ' | ' + language + ' | ' + quality,
|
||||
thumbnail=item.thumbnail, server=server, language=language, quality=quality ))
|
||||
|
||||
listado2 = scrapertools.find_single_match(data, '<ul class="opciones-tab">(.*?)</ul>')
|
||||
patron2 = '<li ><a id="([^"]+)" rel="nofollow" href="([^"]+)" title="[^"]+" alt="([^"]+)"><img src="[^"]+" alt="[^"]+"/>[^<]+</a></li>'
|
||||
matches = matches = re.compile(patron2, re.DOTALL).findall(listado2)
|
||||
for vidId, vidUrl, vidServer in matches:
|
||||
server = servertools.get_server_name(vidServer)
|
||||
itemlist.append(Item(channel=item.channel, action='play', url=vidUrl, extra=vidId, title='Ver en ' + vidServer,
|
||||
thumbnail=item.thumbnail))
|
||||
thumbnail=item.thumbnail, server=server))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.fulltitle = item.title
|
||||
|
||||
@@ -154,6 +154,9 @@ def listado(item):
|
||||
replace("Descarga Serie HD", "", 1).strip()
|
||||
|
||||
show = title
|
||||
|
||||
#TODO quitar calidad del titulo
|
||||
|
||||
if quality:
|
||||
title = "%s [%s]" % (title, quality)
|
||||
|
||||
|
||||
@@ -194,8 +194,25 @@ def findvideos(item):
|
||||
|
||||
patron = '<iframe class=metaframe rptss src=(.*?) frameborder=0 allowfullscreen><\/iframe>'
|
||||
matches = matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for videoitem in matches:
|
||||
itemlist.extend(servertools.find_video_items(data=videoitem))
|
||||
|
||||
for video_url in matches:
|
||||
|
||||
# TODO Reparar directos
|
||||
# if 'stream' in video_url:
|
||||
# data = httptools.downloadpage('https:'+video_url).data
|
||||
# new_url=scrapertools.find_single_match(data, 'iframe src="(.*?)"')
|
||||
# new_data = httptools.downloadpage(new_url).data
|
||||
# logger.debug(new_data)
|
||||
#
|
||||
# url, quality = scrapertools.find_single_match(new_data, "file:'(.*?)',label:'(.*?)'")
|
||||
# headers_string = '|Referer=%s' % url
|
||||
# url = url.replace('download', 'preview')+headers_string
|
||||
# sub = scrapertools.find_single_match(new_data, "file:.*?'(.*?srt)'")
|
||||
# new_item = (Item(title=item.title, url=url, quality=quality, server='directo',
|
||||
# subtitle=sub))
|
||||
# itemlist.append(new_item)
|
||||
# else:
|
||||
itemlist.extend(servertools.find_video_items(data=video_url))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
@@ -206,6 +223,7 @@ def findvideos(item):
|
||||
if 'youtube' in videoitem.url:
|
||||
videoitem.title = '[COLOR orange]Trailer en Youtube[/COLOR]'
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
|
||||
@@ -4,6 +4,9 @@ import re
|
||||
import urlparse
|
||||
|
||||
from core import scrapertools
|
||||
from core import httptools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
@@ -25,8 +28,8 @@ def mainlist(item):
|
||||
url="http://www.vepelis.com/pelicula/ultimas-peliculas/ultimas/actualizadas",
|
||||
extra="http://www.vepelis.com/pelicula/ultimas-peliculas/ultimas/actualizadas"))
|
||||
itemlist.append(Item(channel=item.channel, title="Por Genero", action="generos", url="http://www.vepelis.com/"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Por Orden Alfabetico", action="alfabetico", url="http://www.vepelis.com/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Por Orden Alfabetico", action="alfabetico",
|
||||
url="http://www.vepelis.com/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="http://www.vepelis.com/"))
|
||||
return itemlist
|
||||
|
||||
@@ -35,12 +38,16 @@ def listarpeliculas(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
extra = item.extra
|
||||
|
||||
# Extrae las entradas de la pagina seleccionada
|
||||
'''<td class="DarkText" align="center" valign="top" width="100px" height="160px" style="background-color:#1e1e1e;" onmouseover="this.style.backgroundColor='#000000'" onmouseout="this.style.backgroundColor='#1e1e1e'"><p style="margin-bottom: 3px;border-bottom:#ABABAB 1px solid">
|
||||
<a href="http://www.peliculasaudiolatino.com/movies/Larry_Crowne.html"><img src="http://www.peliculasaudiolatino.com/poster/85x115/peliculas/movieimg/movie1317696842.jpg" alt="Larry Crowne" border="0" height="115" width="85"></a>'''
|
||||
'''<td class="DarkText" align="center" valign="top" width="100px" height="160px"
|
||||
style="background-color:#1e1e1e;" onmouseover="this.style.backgroundColor='#000000'"
|
||||
onmouseout="this.style.backgroundColor='#1e1e1e'"><p style="margin-bottom: 3px;border-bottom:#ABABAB 1px solid">
|
||||
<a href="http://www.peliculasaudiolatino.com/movies/Larry_Crowne.html"><img
|
||||
src="http://www.peliculasaudiolatino.com/poster/85x115/peliculas/movieimg/movie1317696842.jpg"
|
||||
alt="Larry Crowne" border="0" height="115" width="85"></a>'''
|
||||
patron = '<td class=.*?<a '
|
||||
patron += 'href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -54,9 +61,8 @@ def listarpeliculas(item):
|
||||
logger.info(scrapedtitle)
|
||||
|
||||
# Añade al listado
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, extra=extra, folder=True))
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, extra=extra, folder=True))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
patron = 'Anterior.*? :: <a href="/../../.*?/page/([^"]+)">Siguiente '
|
||||
@@ -69,8 +75,8 @@ def listarpeliculas(item):
|
||||
scrapedplot = ""
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="listarpeliculas", title=scrapedtitle, fulltitle=scrapedtitle,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, extra=extra, folder=True))
|
||||
Item(channel=item.channel, action="listarpeliculas", title=scrapedtitle, fulltitle=scrapedtitle,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, extra=extra, folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -78,77 +84,26 @@ def listarpeliculas(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
title = item.title
|
||||
scrapedthumbnail = item.thumbnail
|
||||
itemlist = []
|
||||
patron = '<li><a href="#ms.*?">([^"]+)</a></li>.*?<iframe src="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
# itemlist.append( Item(channel=item.channel, action="play", title=title , fulltitle=item.fulltitle, url=item.url , thumbnail=scrapedthumbnail , folder=False) )
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
for match in matches:
|
||||
url = match[1]
|
||||
title = "SERVIDOR: " + match[0]
|
||||
title = unicode(title, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url,
|
||||
thumbnail=scrapedthumbnail, folder=False))
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
from core import servertools
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
videoitem.quality = item.quality
|
||||
videoitem.language = item.language
|
||||
videoitem.action = 'play'
|
||||
|
||||
return itemlist
|
||||
# data2 = scrapertools.cache_page(item.url)
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/mv.php?url=","http://www.megavideo.com/?v=")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videobb.php?url=","http://www.videobb.com/watch_video.php?v=")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidbux.php?url=","http://www.vidbux.com/")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidxden.php?url=","http://www.vidxden.com/")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videozer.php?url=","http://www.videozer.com/video/")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/v/pl/play.php?url=","http://www.putlocker.com/embed/")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/v/mv/play.php?url=","http://www.modovideo.com/frame.php?v=")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/v/ss/play.php?url=","http://www.sockshare.com/embed/")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/v/vb/play.php?url=","http://vidbull.com/")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/sockshare.php?url=","http://www.sockshare.com/embed/")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/moevide.php?url=","http://moevideo.net/?page=video&uid=")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/novamov.php?url=","http://www.novamov.com/video/")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/movshare.php?url=","http://www.movshare.net/video/")
|
||||
# data2 = data2.replace("http://www.peliculasaudiolatino.com/show/divxstage.php?url=","http://www.divxstage.net/video/")
|
||||
# listavideos = servertools.findvideos(data2)
|
||||
|
||||
|
||||
# for video in listavideos:
|
||||
# invalid = video[1]
|
||||
# invalid = invalid[0:8]
|
||||
# if invalid!= "FN3WE43K" and invalid!="9CC3F8&e":
|
||||
# scrapedtitle = item.title+video[0]
|
||||
# videourl = item.url
|
||||
# server = video[2]
|
||||
# if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")
|
||||
# logger.info("url=" + item.url)
|
||||
|
||||
# Añade al listado de XBMC
|
||||
# itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
|
||||
# itemlist.append( Item(channel=item.channel, action="play" , title=item.title , url=item.url, thumbnail="", plot="", server=item.url))
|
||||
|
||||
|
||||
# return itemlist
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '>.*?<li><a title="(.*?)" href="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -179,12 +134,12 @@ def generos(item):
|
||||
|
||||
def alfabetico(item):
|
||||
logger.info()
|
||||
|
||||
# TODO Hacer esto correctamente
|
||||
extra = item.url
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="listado2", title="0-9", url="http://www.vepelis.com/letra/09.html",
|
||||
extra="http://www.vepelis.com/letra/09.html"))
|
||||
Item(channel=item.channel, action="listado2", title="0-9", url="http://www.vepelis.com/letra/09.html",
|
||||
extra="http://www.vepelis.com/letra/09.html"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado2", title="A", url="http://www.vepelis.com/letra/a.html",
|
||||
extra="http://www.vepelis.com/letra/a.html"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado2", title="B", url="http://www.vepelis.com/letra/b.html",
|
||||
@@ -247,38 +202,29 @@ def listado2(item):
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
patron = '<h2 class="titpeli.*?<a href="([^"]+)" title="([^"]+)".*?peli_img_img">.*?<img src="([^"]+)".*?<strong>Idioma</strong>:.*?/>([^"]+)</div>.*?<strong>Calidad</strong>: ([^"]+)</div>'
|
||||
patron = '<h2 class=titpeli.*?<a href=(.*?) title=(.*?)>.*?peli_img_img>.*?<img src=(.*?) alt.*?'
|
||||
patron += '<p>(.*?)<.*?Genero.*?:.*?(\d{4})<.*?png\/>(.*?)<.*?: (.*?)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
scrapedurl = match[0] # urlparse.urljoin("",match[0])
|
||||
scrapedtitle = match[1] + ' - ' + match[4]
|
||||
scrapedtitle = unicode(scrapedtitle, "iso-8859-1", errors="replace").encode("utf-8")
|
||||
scrapedthumbnail = match[2]
|
||||
# scrapedplot = match[0]
|
||||
# itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, folder=True))
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot, year, language, quality in matches:
|
||||
language = language.strip()
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, language=language,
|
||||
quality=quality, infoLabels={'year': year}))
|
||||
|
||||
# if extra<>"":
|
||||
# Extrae la marca de siguiente página
|
||||
# patron = 'page=(.*?)"><span><b>'
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
patron = '<span><b>(.*?)</b></span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
# if DEBUG: scrapertools.printMatches(matches)
|
||||
for match in matches:
|
||||
# if len(matches)>0:
|
||||
nu = int(match[0]) + 1
|
||||
scrapedurl = extra + "?page=" + str(nu)
|
||||
scrapedtitle = "!Pagina Siguiente ->"
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="listado2", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, extra=extra, folder=True))
|
||||
itemlist.append(Item(channel=item.channel, action="listado2", title=scrapedtitle, fulltitle=scrapedtitle,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, extra=extra, folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -172,13 +172,15 @@ def findvideos(item):
|
||||
duplicated = []
|
||||
|
||||
data = get_source(item.url)
|
||||
logger.debug(data)
|
||||
video_info = scrapertools.find_single_match(data, "load_player\('(.*?)','(.*?)'\);")
|
||||
movie_info = scrapertools.find_single_match(item.url, 'http:\/\/ver-peliculas\.org\/peliculas\/(\d+)-(.*?)-\d{'
|
||||
'4}-online\.')
|
||||
movie_id = movie_info[0]
|
||||
movie_name = movie_info[1]
|
||||
movie_info = scrapertools.find_single_match(item.url,
|
||||
'http:\/\/ver-peliculas\.(io|org)\/peliculas\/(\d+)-(.*?)-\d{4}-online\.')
|
||||
movie_host = movie_info[0]
|
||||
movie_id = movie_info[1]
|
||||
movie_name = movie_info[2]
|
||||
sub = video_info[1]
|
||||
url_base = 'http://ver-peliculas.org/core/api.php?id=%s&slug=%s' % (movie_id, movie_name)
|
||||
url_base = 'http://ver-peliculas.%s/core/api.php?id=%s&slug=%s' % (movie_host, movie_id, movie_name)
|
||||
data = httptools.downloadpage(url_base).data
|
||||
json_data = jsontools.load(data)
|
||||
video_list = json_data['lista']
|
||||
|
||||
@@ -611,7 +611,7 @@ def findvideos(item):
|
||||
patron = '<td><a href="([^"]+)".*?<img src="([^"]+)" title="([^<]+)" .*?<td>([^<]+)</td>.*?<td>([^<]+)</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
print matches
|
||||
for scrapedurl, scrapedthumbnail, scrapedserver, scrapedidioma, scrapedcalidad in matches:
|
||||
for scrapedurl, scrapedthumbnail, scrapedserver, language, quality in matches:
|
||||
|
||||
server = scrapertools.get_match(scrapedserver, '(.*?)[.]')
|
||||
icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers",
|
||||
@@ -623,16 +623,18 @@ def findvideos(item):
|
||||
if not os.path.exists(icon_server):
|
||||
icon_server = scrapedthumbnail
|
||||
|
||||
#TODO eliminar esta seccion
|
||||
scrapedserver = scrapedserver.replace(scrapedserver,
|
||||
"[COLOR darkorange][B]" + "[" + scrapedserver + "]" + "[/B][/COLOR]")
|
||||
scrapedidioma = scrapedidioma.replace(scrapedidioma,
|
||||
"[COLOR lawngreen][B]" + "--" + scrapedidioma + "--" + "[/B][/COLOR]")
|
||||
scrapedcalidad = scrapedcalidad.replace(scrapedcalidad,
|
||||
"[COLOR floralwhite][B]" + scrapedcalidad + "[/B][/COLOR]")
|
||||
language = language.replace(language,
|
||||
"[COLOR lawngreen][B]" + "--" + language + "--" + "[/B][/COLOR]")
|
||||
quality = quality.replace(quality,
|
||||
"[COLOR floralwhite][B]" + quality + "[/B][/COLOR]")
|
||||
|
||||
title = scrapedserver + scrapedidioma + scrapedcalidad
|
||||
title = scrapedserver + language + quality
|
||||
itemlist.append(Item(channel=item.channel, title=title, action="play", url=scrapedurl, thumbnail=icon_server,
|
||||
fanart=item.show.split("|")[6], extra=item.thumbnail, folder=True))
|
||||
fanart=item.show.split("|")[6], extra=item.thumbnail, language= language,
|
||||
quality=quality))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -298,7 +298,7 @@ def findvideos(item):
|
||||
if filtro_enlaces != 1:
|
||||
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Descarga Directa", item)
|
||||
if list_enlaces:
|
||||
itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1,
|
||||
itemlist.append(item.clone(action="", title="Enlaces Descargas", text_color=color1,
|
||||
text_bold=True))
|
||||
itemlist.extend(list_enlaces)
|
||||
|
||||
@@ -344,12 +344,13 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, tipo, item):
|
||||
title = " Mirror en " + server + " (" + language + ") (Calidad " + calidad.strip() + ")"
|
||||
if filtro_idioma == 3 or item.filtro:
|
||||
lista_enlaces.append(item.clone(title=title, action="play", server=server, text_color=color2,
|
||||
url=scrapedurl, idioma=language, orden=orden))
|
||||
url=scrapedurl, idioma=language, orden=orden, language=language))
|
||||
else:
|
||||
idioma = dict_idiomas[language]
|
||||
if idioma == filtro_idioma:
|
||||
lista_enlaces.append(item.clone(title=title, text_color=color2, action="play",
|
||||
url=scrapedurl, server=server, idioma=language, orden=orden))
|
||||
url=scrapedurl, server=server, idioma=language, orden=orden,
|
||||
language=language))
|
||||
else:
|
||||
if language not in filtrados:
|
||||
filtrados.append(language)
|
||||
|
||||
Reference in New Issue
Block a user