This commit is contained in:
alfa-addon
2017-08-25 20:37:16 -04:00
parent 44153b1016
commit 563a0852c2
3 changed files with 65 additions and 61 deletions

View File

@@ -11,16 +11,16 @@ from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis=get_thumb("channels_movie.png")
thumb_series=get_thumb("channels_tvshow.png")
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url="http://www.newpct1.com/",
extra="peliculas", thumbnail=thumb_pelis ))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url="http://www.newpct1.com/", extra="series",
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url="http://www.newpct1.com/", extra="series",
thumbnail=thumb_series))
# itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
@@ -136,22 +136,22 @@ def listado(item):
1).strip()
# logger.info("[newpct1.py] titulo="+title)
'''
if len(title)>3:
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(" ","%20") + '%22'
if len(title)>3:
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(" ","%20") + '%22'
else:
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=' + title
url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=' + title
if "1.com/series-hd" in url:
extra="serie-hd"
url = url_i + '&categoryID=&categoryIDR=1469&calidad=' + calidad.replace(" ","+") #DTV+720p+AC3+5.1
elif "1.com/series-vo" in url:
elif "1.com/series-vo" in url:
extra="serie-vo"
url = url_i + '&categoryID=&categoryIDR=775&calidad=' + calidad.replace(" ","+") #HDTV+720p+AC3+5.1
elif "1.com/series/" in url:
url = url_i + '&categoryID=&categoryIDR=775&calidad=' + calidad.replace(" ","+") #HDTV+720p+AC3+5.1
elif "1.com/series/" in url:
extra="serie-tv"
url = url_i + '&categoryID=&categoryIDR=767&calidad=' + calidad.replace(" ","+")
url = url_i + '&categoryID=&categoryIDR=767&calidad=' + calidad.replace(" ","+")
url += '&idioma=&ordenar=Nombre&inon=Descendente'
url += '&idioma=&ordenar=Nombre&inon=Descendente'
'''
else:
title = title.replace("Descargar", "", 1).strip()
@@ -185,7 +185,7 @@ def listado(item):
paginacion = scrapertools.get_match(data, patron)
if "Next" in paginacion:
url_next_page = scrapertools.get_match(paginacion, '<a href="([^>]+)>Next</a>')[:-1].replace(" ", "%20")
url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1].replace(" ", "%20")
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page,
extra=item.extra))
# logger.info("[newpct1.py] listado items:" + str(len(itemlist)))
@@ -208,7 +208,7 @@ def completo(item):
if item.extra != "serie_add":
'''
# Afinar mas la busqueda
# Afinar mas la busqueda
if item_extra=="serie-hd":
categoryID=buscar_en_subcategoria(item.show,'1469')
elif item_extra=="serie-vo":
@@ -374,7 +374,7 @@ def get_episodios(item):
paginacion = scrapertools.get_match(data, patron)
# logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion)
if "Next" in paginacion:
url_next_page = scrapertools.get_match(paginacion, '<a href="([^>]+)>Next</a>')[:-1]
url_next_page = scrapertools.get_match(paginacion, '<a href="(http[^>]+)>Next</a>')[:-1]
url_next_page = url_next_page.replace(" ", "%20")
# logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page)
itemlist.append(

4
plugin.video.alfa/channels/pelisplus.json Executable file → Normal file
View File

@@ -44,7 +44,9 @@
"latino",
"movie",
"tvshow",
"documentary"
"documentary",
"direct"
],
"settings": [
{

90
plugin.video.alfa/channels/pelisplus.py Executable file → Normal file
View File

@@ -9,6 +9,7 @@ from core import scrapertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from core import servertools
host = "http://www.pelisplus.tv/"
@@ -25,10 +26,11 @@ list_quality = ['1080p',
'720p',
'480p',
'360p',
'240p'
'240p',
'default'
]
list_servers = [
'directo',
'gvideo',
'openload',
'thevideos'
]
@@ -419,65 +421,65 @@ def findvideos(item):
logger.info()
itemlist = []
duplicados = []
datas = httptools.downloadpage(item.url).data
patron = "<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?"
matches = re.compile(patron, re.DOTALL).findall(datas)
data = httptools.downloadpage(item.url).data
logger.debug('data: %s'%data)
video_page = scrapertools.find_single_match(data, "<iframe width='100%' height='500' src='(.*?)' frameborder='0'")
data = httptools.downloadpage(video_page).data
patron = '<li data-id=".*?">\s+<a href="(.*?)" >'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
if 'elreyxhd' or 'pelisplus.biz' in scrapedurl:
patronr = ''
data = httptools.downloadpage(scrapedurl, headers=headers).data
if 'tipo' in scrapedurl:
server = 'gvideo'
gvideo_data = httptools.downloadpage(scrapedurl).data
video_url = scrapertools.find_single_match(gvideo_data,'<div id="player">.*?border: none" src="\/\/(.*?)" ')
video_url= 'http://%s'%video_url
gvideo_url = httptools.downloadpage(video_url).data
videourl = servertools.findvideosbyserver(gvideo_url, server)
quote = scrapertools.find_single_match(data, 'sources.*?file.*?http')
if quote and "'" in quote:
patronr = "file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}"
elif '"' in quote:
patronr = '{file:"(.*?)",label:"(.*?)"}'
if patronr != '':
matchesr = re.compile(patronr, re.DOTALL).findall(data)
logger.debug('videourl: %s'%videourl)
language = 'latino'
quality = 'default'
url = videourl[0][1]
title = '%s (%s)'%(item.contentTitle, server)
thumbnail = item.thumbnail
fanart = item.fanart
if video_url not in duplicados:
itemlist.append(item.clone(action="play",
title=title,
url=url,
thumbnail=thumbnail,
fanart=fanart,
show=title,
extra='gvideo',
language=language,
quality=quality,
server=server
))
duplicados.append(video_url)
for scrapedurl, scrapedcalidad in matchesr:
url = scrapedurl
language = 'latino'
quality = scrapedcalidad.decode('cp1252').encode('utf8')
title = item.contentTitle + ' (' + str(scrapedcalidad) + ')'
thumbnail = item.thumbnail
fanart = item.fanart
if url not in duplicados:
itemlist.append(item.clone(action="play",
title=title,
url=url,
thumbnail=thumbnail,
fanart=fanart,
show=title,
extra='directo',
language=language,
quality=quality,
server='directo',
))
duplicados.append(url)
url = scrapedurl
from core import servertools
itemlist.extend(servertools.find_video_items(data=datas))
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
# videoitem.infoLabels = item.infoLabels
videoitem.channel = item.channel
if videoitem.quality == '' or videoitem.language == '':
videoitem.quality = 'default'
videoitem.language = 'Latino'
videoitem.quality = 'default'
videoitem.language = 'Latino'
if videoitem.server != '':
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
else:
videoitem.thumbnail = item.thumbnail
videoitem.server = 'directo'
videoitem.thumbnail = item.thumbnail
videoitem.server = 'directo'
videoitem.action = 'play'
videoitem.fulltitle = item.title
if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
n = 0
for videoitem in itemlist: