Merge pull request #396 from Alfa-beto/fixes

correcciones e info para los canales
This commit is contained in:
Alfa
2018-08-08 14:31:53 -05:00
committed by GitHub
9 changed files with 761 additions and 247 deletions

View File

@@ -8,6 +8,7 @@
"banner": "https://imgur.com/B1IOAu4.png",
"categories": [
"movie",
"tvshow"
"tvshow",
"vos"
]
}

View File

@@ -6,6 +6,7 @@ import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -17,15 +18,21 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="estrenos", title="Estrenos", url=host))
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
url=urlparse.urljoin(host, "p/peliculas.html")))
url=urlparse.urljoin(host, "p/peliculas.html"), type='pl', first=0))
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
url=urlparse.urljoin(host, "p/series.html")))
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host))
#itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "/search?q=")))
url=urlparse.urljoin(host, "p/series.html"), type='sr', first=0))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
return itemlist
@@ -34,155 +41,175 @@ def category(item):
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron_generos = "<h2 class='title'>"+item.title+"<\/h2><div class='.+?'><ul class='.+?'><(.+?)><\/ul><\/div>"
data_generos = scrapertools.find_single_match(data, patron_generos)
patron = "<a href='(.+?)'>(.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data_generos, patron)
for scrapedurl, scrapedtitle in matches:
if item.cat == 'abc':
data = scrapertools.find_single_match(data, '<span>Orden Alfabético</span>.*?</ul>')
elif item.cat == 'genre':
data = scrapertools.find_single_match(data, '<span>Géneros</span>.*?</ul>')
elif item.cat == 'year':
data = scrapertools.find_single_match(data, '<span>Año</span>.*?</ul>')
elif item.cat == 'quality':
data = scrapertools.find_single_match(data, '<span>Calidad</span>.*?</ul>')
patron = "<li>([^<]+)<a href='([^']+)'>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
if scrapedtitle != 'Próximas Películas':
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl))
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', first=0))
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtype, scrapedthumbnail, scrapedyear, scrapedtitle ,scrapedurl in matches:
title="%s [%s]" % (scrapedtitle,scrapedyear)
new_item= Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail)
if scrapedtype.strip() == 'Serie':
new_item.contentSerieName = scrapedtitle
new_item.action = 'episodios'
new_item.type = 'sr'
else:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
new_item.type = 'pl'
itemlist.append(new_item)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
return search_results(item)
def estrenos(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_estre = "<div class='widget HTML' data-version='1' id='HTML9'><h2 class='title'>(.+?)<\/a><\/li><\/ul>"
data_estre = scrapertools.find_single_match(data, patron_estre)
patron = '<i class="([^"]+)"><\/i><div class="calidad">.+?' #serie o peli
patron +='<img src="([^"]+)"\/>' #scrapedthumbnail
patron +='<h4>([^"]+)<\/h4>.+?' #scrapedtitle
patron +='<a href="([^"]+)">' #scrapedurl
matches = scrapertools.find_multiple_matches(data_estre, patron)
for scrapedtype, scrapedthumbnail,scrapedtitle,scrapedurl in matches:
title = "%s [%s]" % (scrapedtitle, scrapedtype)
if scrapedtype == "pelicula":
itemlist.append(item.clone(title=title, url=host+scrapedurl, action="findvideos", extra=scrapedtype,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie",
context=["buscar_trailer"]))
else:
itemlist.append(item.clone(title=title, url=host+scrapedurl, show=scrapedtitle,
thumbnail=scrapedthumbnail, action="capitulos"))
return itemlist
def capitulos(item):
def episodios(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_datos='<div class="output">(.+?)><\/section>'
data_caps = scrapertools.find_single_match(data, patron_datos)
patron_caps='<img alt=".+?" src="(.+?)"\/><a href="http:\/\/bit.ly\/(.+?)"'
matches = scrapertools.find_multiple_matches(data_caps, patron_caps)
cap=0
for scrapedthumbnail,scrapedurl in matches:
link = scrapedurl
cap=cap+1
link="http://www.trueurl.net/?q=http%3A%2F%2Fbit.ly%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<A title="http:\/\/privatelink.de\/\?(.+?)"'
url = scrapertools.find_single_match(data_other, patron)
title="%s%s - %s" % (title,str(cap).zfill(2),item.show)
itemlist.append(item.clone(action='findvideos', title=title,
url=url,show=item.show,thumbnail=scrapedthumbnail))
patron ='<div id="ep(\d+)" class="eps"> <section class="section-post online"><div class="player">.*?'
patron += 'src="([^"]+)"/><a href="([^"]+)" target='
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedepi, scrapedthumbnail, scrapedurl in matches:
url = scrapedurl
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
itemlist.append(item.clone(action='findvideos', title=title, url=url, thumbnail=scrapedthumbnail, type=item.type,
infoLabels=item.infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
url=item.url, action="add_serie_to_library", extra="episodios",
contentSerieName=item.contentSerieName))
return itemlist
def bitly(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="http:\/\/bit.ly\/(.+?)"'
link = scrapertools.find_single_match(data, patron)
link="http://www.trueurl.net/?q=http%3A%2F%2Fbit.ly%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<A title="http:\/\/privatelink.de\/\?(.+?)"'
url = scrapertools.find_single_match(data_other, patron)
if item.contentType=="movie":
contentType="movie"
else:
contentType="serie"
item=(item.clone(action='findvideos',url=url,show=item.show, thumbnail=item.thumbnail, contentType=contentType))
return item
def lista(item):
logger.info()
next = True
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<i class="(.+?)"><\/i>' # scrapedtype
patron +='<div class="calidad">(.+?)<\/div>' # scrapedquality
patron += '<img src="(.+?)"\/>' # scrapedthumbnail
patron += '<h4>(.+?)<\/h4>' # scrapedtitle
patron += "<h5>(.+?)<\/h5>" # scrapedyear
patron += '<a href="(.+?)"' # scrapedurl
#patron += "<\/a>.+?<div class='item-snippet'>(.+?)<" # scrapedplot
if item.title!="Prueba":
pat='<div id="tab-1"><ul class="post-gallery">(.+?)<\/ul><\/div>'
data=scrapertools.find_single_match(data, pat)
data = scrapertools.find_single_match(data, "itemprop='headline'>.*?</h2>.*?</ul>")
patron = '<span class="([^"]+)">.*?<figure class="poster-bg"><header><span>(\d{4})</span></header><img src="([^"]+)" />'
patron += '<footer>(.*?)</footer></figure><h6>([^<]+)</h6><a href="([^"]+)"></a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtype,scrapedquality,scrapedthumbnail,scrapedtitle,scrapedyear,scrapedurl in matches:
first = int(item.first)
last = first + 19
if last > len(matches):
last = len(matches)
next = False
for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle ,scrapedurl in matches[first:last]:
patron_quality="<span>(.+?)</span>"
quality = scrapertools.find_multiple_matches(scrapedquality, patron_quality)
qual=""
for calidad in quality:
qual=qual+"["+calidad+"] "
title="%s [%s] %s" % (scrapedtitle,scrapedyear,qual)
if item.title =="Series":
itemlist.append(item.clone(title=title, url=host+scrapedurl, extra=scrapedtitle, plot=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="serie", action="capitulos"))
elif scrapedtype != 'serie':
itemlist.append(
item.clone(title=title, url=host+scrapedurl, action="findvideos", extra=scrapedtype, plot=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
new_item= Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail,
type=scrapedtype, infoLabels={'year':scrapedyear})
if scrapedtype.strip() == 'sr':
new_item.contentSerieName = scrapedtitle
new_item.action = 'episodios'
else:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
# Paginacion
patron_genero = '<h1>([^"]+)<\/h1>'
genero = scrapertools.find_single_match(data, patron_genero)
if genero == "Romance" or genero == "Drama":
patron = "<a rel='nofollow' class=previouspostslink' href='([^']+)'>Siguiente "
else:
patron = "<span class='current'>.+?href='(.+?)'>"
if scrapedtype == item.type or item.type == 'cat':
itemlist.append(new_item)
next_page_url = scrapertools.find_single_match(data, patron)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
#pagination
url_next_page = item.url
first = last
if next:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
if next_page_url != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if item.extra == 'pelicula':
item = bitly(item)
dl_links = []
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
show = item.show
for videoitem in itemlist:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
return itemlist
### obtiene los gvideo
patron = 'class="Button Sm fa fa-download mg"></a><a target="_blank" rel="nofollow" href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for dl_url in matches:
g_data = httptools.downloadpage(dl_url).data
video_id = scrapertools.find_single_match(g_data, 'jfk-button jfk-button-action" href="([^"]+)">')
g_url = '%s%s' % ('https://drive.google.com', video_id)
g_url = g_url.replace('&amp;', '&')
g_data = httptools.downloadpage(g_url, follow_redirects=False, only_headers=True).headers
url = g_data['location']
dl_links.append(Item(channel=item.channel, title='%s', url=url, action='play', infoLabels=item.infoLabels))
if item.type == 'pl':
new_url = scrapertools.find_single_match(data, '<div class="player">.*?<a href="([^"]+)" target')
data = httptools.downloadpage(new_url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="btn.*?" data-video="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for video_id in matches:
url_data = httptools.downloadpage('https://tinyurl.com/%s' % video_id, follow_redirects=False)
url = url_data.headers['location']
itemlist.append(Item(channel=item.channel, title = '%s', url=url, action='play', infoLabels=item.infoLabels))
itemlist.extend(dl_links)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist

View File

@@ -252,10 +252,8 @@ def ultimas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
data = data.decode('cp1252')
realplot = ''
patron = '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)" alt=.*? style="width:105px; height:160px; ' \
'border:1px solid #999"\/><\/a>'
patron = '<a href="([^"]+)" title="([^"]+)"> <img src="([^"]+)".*?solid'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -312,10 +310,7 @@ def letras(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
data = data.decode('cp1252')
data = scrapertools.find_single_match(data, '<\/form><\/table><\/div>.*?<\/ul>')
patron = '<li><a href="(.*?)" title="Letra.*?">(.*?)<\/a><\/li>'
patron = '<li><a href="([^"]+)" title="Letra.*?">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -356,36 +351,40 @@ def findvideos(item):
logger.info()
itemlist = []
new_url = get_link(get_source(item.url))
new_url = get_link(get_source(new_url))
video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
new_url = '%s%s' % (host, 'playeropstream/api.php')
post = {'h': video_id}
post = urllib.urlencode(post)
data = httptools.downloadpage(new_url, post=post).data
json_data = jsontools.load(data)
url = json_data['url']
server = servertools.get_server_from_url(url)
title = '%s [%s]' % (server, item.language)
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=item.language,
server=server, infoLabels=item.infoLabels))
try:
new_url = get_link(get_source(item.url))
new_url = get_link(get_source(new_url))
video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
new_url = '%s%s' % (host, 'playeropstream/api.php')
post = {'h': video_id}
post = urllib.urlencode(post)
data = httptools.downloadpage(new_url, post=post).data
json_data = jsontools.load(data)
url = json_data['url']
server = servertools.get_server_from_url(url)
title = '%s [%s]' % (server, item.language)
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=item.language,
server=server, infoLabels=item.infoLabels))
# Requerido para FilterTools
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
# Requerido para AutoPlay
autoplay.start(itemlist, item)
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
except:
pass
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist

View File

@@ -0,0 +1,73 @@
{
"id": "peliculasyseries",
"name": "PeliculasySeries",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://s22.postimg.cc/xy1burkep/peliculasyseries.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"VOSE",
"VOS",
"VO"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,345 @@
# -*- coding: utf-8 -*-
# -*- Channel PeliculasySeries -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'la': 'Latino', 'lat':'Latino', 'cas':'Castellano','es': 'Castellano', 'vs': 'VOSE', 'vos':'VOSE', 'vo':'VO',
'ori':'VO', 'so':'VOS', 'sor':'VOS'}
list_language = IDIOMAS.values()
list_quality = ['TS','Screener','DVDRip','HDRip', 'HDTV', 'micro720', 'micro1080']
list_servers = ['openload', 'rapidvideo', 'powvideo', 'gamovideo', 'streamplay', 'flashx', 'clipwatching', 'vidoza',
'thevideome']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculasyseries')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculasyseries')
host = 'https://peliculasyseries.org/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'series', action='list_all', type='tvshows',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'buscar/q/', thumbnail=get_thumb("search", auto=True),
extra='movie'))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_data = lang_data.replace('language-ES', '').replace('medium', '').replace('serie', '').replace('-','')
if 'class' in lang_data:
lang_list = scrapertools.find_multiple_matches(lang_data, 'class=" ([^"]+)"')
else:
return lang_data.strip()
for lang in lang_list:
if lang not in IDIOMAS:
lang = 'VOS'
if lang not in language:
language.append(IDIOMAS[lang])
return language
def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host)
data = scrapertools.find_single_match(data, 'data-toggle="dropdown">Géneros.*?multi-column-dropdown">.*?"clearfix"')
if 'Genero' in item.title:
patron = '<li><a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if title not in duplicados:
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all',
type=item.type))
duplicados.append(title)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?'
patron += '<div class="calidad" >([^<]+)</div> <div class="audio-info">'
patron += '(.*?)<div class="w3l-action-icon">.*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, quality, lang_data, year in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
if 'screener' in quality.lower():
quality = 'Screener'
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
language = get_language(lang_data)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=language,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
context=filtertools.context(item, list_language, list_quality),
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<a class='last' href='([^']+)'>»</a>")
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<a href="([^"]+)"><img class="thumb-item" src="([^"]+)" alt="[^"]+" >'
patron += '<div class="season-item">Temporada (\d+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, season in matches:
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons',
thumbnail=scrapedthumbnail, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron ='class="row-serie-item"><a href="([^"]+)">.*?<img class="episode-thumb-item" src="([^"]+)" alt="([^"]+)" >'
patron += '<divclass="audio-info-series">(.*?)<div class="episode-item">%s+x(\d+)</div>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, scrapedepisode in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
language = get_language(lang_data)
title = '%sx%s - %s %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle, language)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos',
thumbnail=scrapedthumbnail, language=language, infoLabels=infoLabels))
itemlist = filtertools.get_links(itemlist, item, list_language)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
from lib import generictools
itemlist = []
data = get_source(item.url)
patron = '<div class="available-source" ><div class="([^"]+)">.*?'
patron += 'data-data="([^"]+)".*?<span class="quality-text">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang_data, scrapedurl, quality in matches:
lang = get_language(lang_data)
if 'screener' in quality.lower():
quality = 'Screener'
quality = quality
title = '%s [%s] [%s]'
url = base64.b64decode(scrapedurl[1:])
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.quality, x.language))
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language, list_quality)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<li class="search-results-item media-item" .*?<a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="content" src="([^"]+)" .*?>(Pelicula|Serie) del año([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumb, content_type, year in matches:
title = scrapedtitle
if len(year)==0:
year = '-'
url = scrapedurl
thumbnail = scrapedthumb
if not '/serie' in url:
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, action=action,
infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'movies'
elif categoria == 'infantiles':
item.url = host + 'genero/animation/'
item.type='movies'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -72,7 +72,7 @@ def menu_movies(item):
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -93,9 +93,9 @@ def section(item):
duplicados=[]
data = get_source(host+'/'+item.type)
if 'Genero' in item.title:
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) >(.*?)/i>'
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >(.*?)</i'
elif 'Año' in item.title:
patron = '<li><a href=(.*?release.*?)>(.*?)</a>'
patron = '<li><a href="(.*?release.*?)">([^<]+)<'
elif 'Calidad' in item.title:
patron = 'menu-item-object-dtquality menu-item-\d+><a href=(.*?)>(.*?)</a>'
@@ -105,8 +105,8 @@ def section(item):
title = scrapedtitle
plot=''
if 'Genero' in item.title:
quantity = scrapertools.find_single_match(scrapedtitle,'</a> <i>(.*?)<')
title = scrapertools.find_single_match(scrapedtitle,'(.*?)</')
quantity = scrapertools.find_single_match(scrapedtitle,'</a> <i>([^<]+)<')
title = scrapertools.find_single_match(scrapedtitle,'([^<]+)</')
title = title
plot = '%s elementos' % quantity.replace('.','')
else:
@@ -124,33 +124,31 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<article id=post-\d+ class=item movies><div class=poster><img src=(.*?) alt=(.*?)>.*?quality>(.*?)'
patron += '</span><\/div><a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>.*?flags(.*?)metadata'
patron = '<article id="post-\d+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '"quality">([^<]+)</span><\/div><a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year, lang_data in matches:
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
language = get_language(lang_data)
#language = get_language(lang_data)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=language,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<article id=post-\d+ class=item tvshows><div class=poster><img src=(.*?) alt=(.*?)>.*?'
patron += '<a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>'
patron = '<article id="post-\d+" class="item tvshows"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<a href="([^"]+)">.*?<span>(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
@@ -170,7 +168,7 @@ def list_all(item):
# Paginación
#url_next_page = scrapertools.find_single_match(data,"<a class='arrow_pag' href=([^>]+)><i id='nextpagination'")
url_next_page = scrapertools.find_single_match(data,"<link rel=next href=([^ ]+) />")
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^ ]+)" />')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
@@ -216,7 +214,7 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='class=numerando>%s - (\d+)</div><div class=episodiotitle><a href=(.*?)>(.*?)<' % item.infoLabels['season']
patron='class="numerando">%s - (\d+)</div><div class="episodiotitle"><a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -239,10 +237,10 @@ def findvideos(item):
from lib import generictools
itemlist = []
data = get_source(item.url)
patron = 'id=option-(\d+).*?rptss src=(.*?) frameborder'
patron = 'id="option-(\d+).*?rptss" src="([^"]+)" frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
lang = scrapertools.find_single_match(data, 'href=#option-%s>.*?/flags/(.*?).png' % option)
lang = scrapertools.find_single_match(data, 'href="#option-%s">.*?/flags/(.*?).png' % option)
quality = ''
if lang not in IDIOMAS:
lang = 'en'
@@ -306,7 +304,8 @@ def search_results(item):
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<a href=(.*?)><img src=(.*?) alt=(.*?) />.*?meta.*?year>(.*?)<(.*?)<p>(.*?)</p>'
patron = '<article>.*?<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?"meta".*?'
patron += '"year">([^<]+)<(.*?)<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:

View File

@@ -10,6 +10,7 @@ from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import filtertools
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
@@ -97,7 +98,6 @@ def episodios(item):
patron_caps = '<li><span>Capitulo (\d+).*?</span><a href="(.*?)">(.*?)</a></li>'
matches = scrapertools.find_multiple_matches(data, patron_caps)
# data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
patron_info = '<img src="([^"]+)">.+?</span>(.*?)</p>.*?<h2>Reseña:</h2><p>(.*?)</p>'
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
scrapedthumbnail = host + scrapedthumbnail
@@ -142,66 +142,73 @@ def episodios(item):
return itemlist
def findvideos(item):
logger.info()
import base64
logger.info()
itemlist = []
url_server = "https://openload.co/embed/%s/"
url_api_get_key = "https://serieslan.com/idx.php?i=%s&k=%s"
def txc(key, _str):
s = range(256)
j = 0
res = ''
for i in range(256):
j = (j + s[i] + ord(key[i % len(key)])) % 256
x = s[i]
s[i] = s[j]
s[j] = x
i = 0
j = 0
for y in range(len(_str)):
i = (i + 1) % 256
j = (j + s[i]) % 256
x = s[i]
s[i] = s[j]
s[j] = x
res += chr(ord(_str[y]) ^ s[(s[i] + s[j]) % 256])
return res
data = httptools.downloadpage(item.url).data
pattern = "<script type=.+?>.+?\['(.+?)','(.+?)','.+?'\]"
idv, ide = scrapertools.find_single_match(data, pattern)
thumbnail = scrapertools.find_single_match(data,
'<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">')
show = scrapertools.find_single_match(data, '<span>Episodio: <\/span>([^"]*)<\/p><p><span>Idioma')
thumbnail = host + thumbnail
data = httptools.downloadpage(url_api_get_key % (idv, ide), headers={'Referer': item.url}).data
data = eval(data)
if type(data) == list:
video_url = url_server % (txc(ide, base64.decodestring(data[2])))
server = "openload"
if " SUB" in item.title:
lang = "VOS"
elif " Sub" in item:
lang = "VOS"
else:
lang = "Latino"
title = "Enlace encontrado en " + server + " [" + lang + "]"
if item.contentChannel=='videolibrary':
itemlist.append(item.clone(channel=item.channel, action="play", url=video_url,
thumbnail=thumbnail, server=server, folder=False))
else:
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
thumbnail=thumbnail, server=server, folder=False))
_sa = scrapertools.find_single_match(data, 'var _sa = (true|false);')
_sl = scrapertools.find_single_match(data, 'var _sl = ([^;]+);')
sl = eval(_sl)
autoplay.start(itemlist, item)
return itemlist
else:
return []
buttons = scrapertools.find_multiple_matches(data, '<button href="" class="selop" sl="([^"]+)">([^<]+)</button>')
for id, title in buttons:
new_url = golink(int(id), _sa, sl)
data = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data, 'var x0x = ([^;]+);')
x0x = eval(_x0x)
url = resolve(x0x[4], base64.b64decode(x0x[1]))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def golink (num, sa, sl):
import urllib
b = [3, 10, 5, 22, 31]
d = ''
for i in range(len(b)):
d += sl[2][b[i]+num:b[i]+num+1]
SVR = "https://viteca.stream" if sa == 'true' else "http://serieslan.com"
TT = "/" + urllib.quote_plus(sl[3].replace("/", "><")) if num == 0 else ""
return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + d + TT
def resolve(value1, value2):
reto = ''
lista = range(256)
j = 0
for i in range(256):
j = (j + lista[i] + ord(value1[i % len(value1)])) % 256
k = lista[i]
lista[i] = lista[j]
lista[j] = k
m = 0;
j = 0;
for i in range(len(value2)):
m = (m + 1) % 256
j = (j + lista[m]) % 256
k = lista[m]
lista[m] = lista[j]
lista[j] = k
reto += chr(ord(value2[i]) ^ lista[(lista[m] + lista[j]) % 256])
return reto

63
plugin.video.alfa/channels/vepelis.py Executable file → Normal file
View File

@@ -2,10 +2,12 @@
import re
import urlparse
import urllib
from core import scrapertools
from core import httptools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -81,24 +83,6 @@ def listarpeliculas(item):
return itemlist
def findvideos(item):
logger.info()
# Descarga la página
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.quality = item.quality
videoitem.language = item.language
videoitem.action = 'play'
return itemlist
def generos(item):
logger.info()
itemlist = []
@@ -230,6 +214,46 @@ def listado2(item):
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_link(data):
new_url = scrapertools.find_single_match(data, '(?:IFRAME|iframe) src="([^"]+)" scrolling')
return new_url
def findvideos(item):
logger.info()
host = 'https://www.locopelis.tv/'
itemlist = []
new_url = get_link(get_source(item.url))
new_url = get_link(get_source(new_url))
video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)')
new_url = '%s%s' % (host, 'playeropstream/api.php')
post = {'h': video_id}
post = urllib.urlencode(post)
data = httptools.downloadpage(new_url, post=post).data
json_data = jsontools.load(data)
url = json_data['url']
server = servertools.get_server_from_url(url)
title = '%s' % server
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
server=server, infoLabels=item.infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle = item.fulltitle
))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
@@ -277,3 +301,6 @@ def newest(categoria):
return []
return itemlist

View File

@@ -63,6 +63,11 @@ def getchanneltypes(view="thumb_"):
# Lista de categorias
channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "direct", "torrent"]
dict_types_lang = {'movie': config.get_localized_string(30122), 'tvshow': config.get_localized_string(30123),
'anime': config.get_localized_string(30124), 'documentary': config.get_localized_string(30125),
'vos': config.get_localized_string(30136), 'adult': config.get_localized_string(30126),
'direct': config.get_localized_string(30137)}
if config.get_setting("adult_mode") != 0:
channel_types.append("adult")
@@ -77,7 +82,6 @@ def getchanneltypes(view="thumb_"):
viewmode="thumbnails"))
for channel_type in channel_types:
logger.info("channel_type=%s" % channel_type)
title = config.get_localized_category(channel_type)
itemlist.append(Item(title=title, channel="channelselector", action="filterchannels", category=title,
channel_type=channel_type, viewmode="thumbnails",
@@ -169,10 +173,11 @@ def filterchannels(category, view="thumb_"):
context.append({"title": "Configurar canal", "channel": "setting", "action": "channel_config",
"config": channel_parameters["channel"]})
channel_info = set_channel_info(channel_parameters)
# Si ha llegado hasta aquí, lo añade
channelslist.append(Item(title=channel_parameters["title"], channel=channel_parameters["channel"],
action="mainlist", thumbnail=channel_parameters["thumbnail"],
fanart=channel_parameters["fanart"], category=channel_parameters["title"],
fanart=channel_parameters["fanart"], plot=channel_info, category=channel_parameters["title"],
language=channel_parameters["language"], viewmode="list", context=context))
except:
@@ -232,3 +237,34 @@ def get_thumb(thumb_name, view="thumb_", auto=False):
media_path = os.path.join(resource_path, icon_pack_name)
return os.path.join(media_path, view + thumb_name)
def set_channel_info(parameters):
logger.info()
info = ''
language = ''
content = ''
langs = parameters['language']
lang_dict = {'lat':'Latino', 'cast':'Castellano', '*':'Latino, Castellano, VOSE, VO'}
for lang in langs:
if 'vos' in parameters['categories']:
lang = '*'
if lang in lang_dict:
if language != '' and language != '*' and not parameters['adult']:
language = '%s, %s' % (language, lang_dict[lang])
elif not parameters['adult']:
language = lang_dict[lang]
if lang == '*':
break
categories = parameters['categories']
for cat in categories:
if content != '':
content = '%s, %s' % (content, config.get_localized_category(cat))
else:
content = config.get_localized_category(cat)
info = '[COLOR yellow]Tipo de contenido:[/COLOR] %s\n\n[COLOR yellow]Idiomas:[/COLOR] %s' % (content, language)
return info