Merge pull request #196 from Intel11/actualizados

Actualizados
This commit is contained in:
Alfa
2018-01-24 18:11:15 -05:00
committed by GitHub
5 changed files with 430 additions and 378 deletions

View File

@@ -8,8 +8,7 @@
"banner": "allpeliculas.png",
"categories": [
"movie",
"vos",
"tvshow"
"vos"
],
"settings": [
{

View File

@@ -27,7 +27,6 @@ def mainlist(item):
logger.info()
itemlist = []
item.viewmode = viewmode
data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
total = scrapertools.find_single_match(data, "Películas</h1><span>(.*?)</span>")
titulo = "Peliculas (%s)" %total
@@ -56,7 +55,6 @@ def mainlist(item):
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
@@ -121,7 +119,6 @@ def newest(categoria):
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
@@ -129,7 +126,6 @@ def peliculas(item):
logger.info()
itemlist = []
item.text_color = color2
data = httptools.downloadpage(item.url).data
patron = '(?s)class="(?:result-item|item movies)">.*?<img src="([^"]+)'
patron += '.*?alt="([^"]+)"'
@@ -162,7 +158,6 @@ def peliculas(item):
if next_page_link:
itemlist.append(item.clone(action="peliculas", title=">> Página siguiente", url=next_page_link,
text_color=color3))
return itemlist
@@ -170,15 +165,11 @@ def destacadas(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, 'peliculas_destacadas.*?class="single-page')
patron = '(?s)title="([^"]+)"'
patron += '.href="([^"]+)"'
patron += '.*?src="([^"]+)'
bloque = scrapertools.find_single_match(data, 'peliculas_destacadas.*?class="letter_home"')
patron = '(?s)title="([^"]+)".*?'
patron += 'href="([^"]+)".*?'
patron += 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
scrapedurl = CHANNEL_HOST + scrapedurl
@@ -186,7 +177,6 @@ def destacadas(item):
url=scrapedurl, thumbnail=scrapedthumbnail,
contentType="movie"
))
# Extrae el paginador
next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s+><span [^>]+>&raquo;</span>')
if next_page_link:
itemlist.append(
@@ -197,11 +187,8 @@ def destacadas(item):
def generos(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '(?s)dos_columnas">(.*?)</ul>')
# Extrae las entradas
patron = '<li><a.*?href="/([^"]+)">(.*?)</li>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
@@ -211,25 +198,21 @@ def generos(item):
if scrapedtitle == "Erotico" and config.get_setting("adult_mode") == 0:
continue
itemlist.append(item.clone(action="peliculas", title=scrapedtitle, url=scrapedurl))
return itemlist
def idioma(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="peliculas", title="Español", url= CHANNEL_HOST + "idioma/espanol/"))
itemlist.append(item.clone(action="peliculas", title="Latino", url= CHANNEL_HOST + "idioma/latino/"))
itemlist.append(item.clone(action="peliculas", title="VOSE", url= CHANNEL_HOST + "idioma/subtitulado/"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
try:
filtro_idioma = config.get_setting("filterlanguages", item.channel)
filtro_enlaces = config.get_setting("filterlinks", item.channel)
@@ -237,13 +220,9 @@ def findvideos(item):
filtro_idioma = 3
filtro_enlaces = 2
dict_idiomas = {'Español': 2, 'Latino': 1, 'Subtitulado': 0}
# Busca el argumento
data = httptools.downloadpage(item.url).data
if item.infoLabels["year"]:
tmdb.set_infoLabels(item, __modo_grafico__)
if filtro_enlaces != 0:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item)
if list_enlaces:
@@ -256,7 +235,6 @@ def findvideos(item):
itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1,
text_bold=True))
itemlist.extend(list_enlaces)
if itemlist:
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))

View File

@@ -0,0 +1,39 @@
{
"id": "novelashdgratis",
"name": "Novelas HD Gratis",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s18.postimg.org/okqzs7zy1/logo.gif",
"banner": "",
"version": 1,
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,164 @@
# -*- coding: utf-8 -*-
# -*- Channel Novelas HD Gratis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import filtertools
from channels import autoplay
host = 'http://www.novelashdgratis.io'
IDIOMAS = {'la':'Latino'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['powvideo',
'netu',
'playedto',
'allmyvideos',
'gamovideo',
'openload',
'dailymotion',
'streamplay',
'streaminto',
'youtube',
'vidoza',
'flashx']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(item.clone(title="En Emision", action="list_all", url=host, type='emision'))
itemlist.append(item.clone(title="Ultimas Agregadas", action="list_all", url=host, type='ultimas'))
itemlist.append(item.clone(title="Todas", action="list_all", url=host, type='todas'))
itemlist.append(item.clone(title="Alfabetico", action="alpha", url=host, type='alfabetico'))
if autoplay.context:
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist =[]
data = get_source(item.url)
no_thumbs= ['emision', 'todas']
if item.type not in no_thumbs:
patron = '<div class=picture><a href=(.*?) title=(.*?)><img src=(.*?) width='
else:
if item.type == 'emision':
data = scrapertools.find_single_match(data, 'class=dt>Telenovelas que se Transmiten<\/div>.*?</ul>')
if item.type == 'todas':
data = scrapertools.find_single_match(data, 'class=dt>Lista de Novelas<\/div>.*?</ul>')
patron = '<li><a href=(.*?) title=(.*?)>.*?</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.type in no_thumbs:
for scrapedurl, scrapedtitle in matches:
url = host+scrapedurl
contentSerieName = scrapedtitle
title = contentSerieName
new_item = Item(channel=item.channel, title=title, url= url, action='episodes',
contentSerieName= contentSerieName)
itemlist.append(new_item)
else:
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = host + '/'+scrapedurl
contentSerieName = scrapedtitle
title = contentSerieName
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=title, url=url, action='episodes', thumbnail=thumbnail,
contentSerieName=contentSerieName)
itemlist.append(new_item)
return itemlist
def alpha(item):
logger.info()
itemlist= []
data = get_source(item.url)
patron = '<li class=menu-gen><a href=(.*?)>(.*?)</a> </li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=host+scrapedurl, action='list_all'))
return itemlist
def episodes(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<li class=lc><a href=(.*?) title=.*?class=lcc>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = host+scrapedurl
new_item = Item(channel=item.channel, title=title, url=url, action='findvideos')
itemlist.append(new_item)
return itemlist [::-1]
def findvideos(item):
logger.info()
servers = {'powvideo':'http://powvideo.net/embed-',
'netu':'http://netu.tv/watch_video.php?v=',
'played':'http://played.to/embed-',
'allmy':'http://allmyvideos.net/embed-',
'gamo':'http://gamovideo.com/embed-',
'openload':'https://openload.co/embed/',
'daily':'http://www.dailymotion.com/embed/video/',
'play':'http://streamplay.to/embed-',
'streamin':'http://streamin.to/embed-',
'youtube':'https://www.youtube.com/embed/',
'vidoza':'https://vidoza.net/embed-',
'flashx':'https://www.flashx.tv/embed-'}
itemlist = []
data = get_source(item.url)
patron = 'id=tab\d+><script>(.*?)\((.*?)\)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for server, id in matches:
if server in servers:
url= '%s%s'%(servers[server], id)
itemlist.append(item.clone(url=url, title='%s', action='play', language=IDIOMAS['la']))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,4 +1,7 @@
# -*- coding: utf-8 -*-
# -*- Channel PepeCine -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urlparse
@@ -11,383 +14,252 @@ from core import tmdb
from core.item import Item, InfoLabels
from platformcode import config, logger
__url_base__ = "http://pepecine.net"
__chanel__ = "pepecine"
fanart_host = "https://d12.usercdn.com/i/02278/u875vjx9c0xs.png"
host = "https://pepecine.tv"
perpage = 20
def mainlist1(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action='movies_menu'))
#itemlist.append(item.clone(title="Series", action='tvshows_menu'))
return itemlist
def mainlist(item):
logger.info()
itemlist = []
url_peliculas = urlparse.urljoin(__url_base__, "plugins/ultimas-peliculas-updated.php")
itemlist.append(
Item(channel=__chanel__, title="Películas", text_color="0xFFEB7600", text_bold=True, fanart=fanart_host,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/movies.png"))
itemlist.append(Item(channel=__chanel__, action="listado", title=" Novedades", page=0, viewcontent="movies",
text_color="0xFFEB7600", extra="movie", fanart=fanart_host, url=url_peliculas,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/movies.png"))
itemlist.append(Item(channel=__chanel__, action="sub_filtrar", title=" Filtrar películas por género",
text_color="0xFFEB7600", extra="movie", fanart=fanart_host, url=url_peliculas,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/movies_filtrar.png"))
itemlist.append(Item(channel=__chanel__, action="search", title=" Buscar películas por título",
text_color="0xFFEB7600", extra="movie", fanart=fanart_host, url=url_peliculas,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/movies_buscar.png"))
url_series = urlparse.urljoin(__url_base__, "plugins/series-episodios-updated.php")
itemlist.append(
Item(channel=__chanel__, title="Series", text_color="0xFFEB7600", text_bold=True, fanart=fanart_host,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png"))
itemlist.append(Item(channel=__chanel__, action="listado", title=" Novedades", page=0, viewcontent="tvshows",
text_color="0xFFEB7600", extra="series", fanart=fanart_host, url=url_series,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png"))
itemlist.append(Item(channel=__chanel__, action="sub_filtrar", title=" Filtrar series por género",
text_color="0xFFEB7600", extra="series", fanart=fanart_host, url=url_series,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv_filtrar.png"))
itemlist.append(Item(channel=__chanel__, action="search", title=" Buscar series por título",
text_color="0xFFEB7600", extra="series", fanart=fanart_host, url=url_series,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv_buscar.png"))
itemlist.append(Item(channel=__chanel__, action="listado", title=" Ultimos episodios actualizados",
text_color="0xFFEB7600", extra="series_novedades", fanart=fanart_host,
url=urlparse.urljoin(__url_base__, "plugins/ultimos-capitulos-updated.php"),
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png"))
itemlist.append(Item(channel=item.channel,
title="Ultimas",
url=host+'/tv-peliculas-online',
action='list_latest',
indexp=1,
type='movie'))
itemlist.append(Item(channel=item.channel,
title="Todas",
url= host+'/ver-online',
action='list_all',
page='1',
type='movie'))
itemlist.append(Item(channel=item.channel,
title="Género",
url= host,
action='genero',
page='1',
type='movie'))
itemlist.append(Item(channel=item.channel, title = "", action =""))
itemlist.append(Item(channel=item.channel,
title="Buscar",
url= host+'/esta-online?q=',
action='search',
page='1',
type='movie'))
return itemlist
def sub_filtrar(item):
def genero(item):
logger.info()
itemlist = []
generos = ("acción", "animación", "aventura", "ciencia ficción", "comedia", "crimen",
"documental", "drama", "familia", "fantasía", "guerra", "historia", "misterio",
"música", "musical", "romance", "terror", "thriller", "western")
thumbnail = ('https://d12.usercdn.com/i/02278/spvnq8hghtok.jpg',
'https://d12.usercdn.com/i/02278/olhbpe7phjas.jpg',
'https://d12.usercdn.com/i/02278/8xm23q2vewtt.jpg',
'https://d12.usercdn.com/i/02278/o4vuvd7q4bau.jpg',
'https://d12.usercdn.com/i/02278/v7xq7k9bj3dh.jpg',
'https://d12.usercdn.com/i/02278/yo5uj9ff7jmg.jpg',
'https://d12.usercdn.com/i/02278/ipeodwh6vw6t.jpg',
'https://d12.usercdn.com/i/02278/0c0ra1wb11ro.jpg',
'https://d12.usercdn.com/i/02278/zn85t6f2oxdv.jpg',
'https://d12.usercdn.com/i/02278/ipk94gsdqzwa.jpg',
'https://d12.usercdn.com/i/02278/z5hsi6fr4yri.jpg',
'https://d12.usercdn.com/i/02278/nq0jvyp7vlb9.jpg',
'https://d12.usercdn.com/i/02278/tkbe7p3rjmps.jpg',
'https://d12.usercdn.com/i/02278/is60ge4zv1ve.jpg',
'https://d12.usercdn.com/i/02278/86ubk310hgn8.jpg',
'https://d12.usercdn.com/i/02278/ph1gfpgtljf7.jpg',
'https://d12.usercdn.com/i/02278/bzp3t2edgorg.jpg',
'https://d12.usercdn.com/i/02278/31i1xkd8m30b.jpg',
'https://d12.usercdn.com/i/02278/af05ulgs20uf.jpg')
itemlist=[]
data = httptools.downloadpage(item.url).data
data = data.replace("\n","")
bloque = scrapertools.find_single_match(data, 'Peliculas</h2><div id="SlideMenu1" class="s2">.*?SlideMenu1_Folder">.*?</ul></li>')
patron = '<a href="([^"]+).*?'
patron += '<li>([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(action = "list_all",
channel = item.channel,
page='1',
title = scrapedtitle,
type= item.type,
url = host + scrapedurl
))
return itemlist
if item.extra == "movie":
viewcontent = "movies"
else:
viewcontent = "tvshows"
for g, t in zip(generos, thumbnail):
itemlist.append(item.clone(action="listado", title=g.capitalize(), filtro=("genero", g), thumbnail=t,
viewcontent=viewcontent))
def tvshows_menu(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel,
title="Ultimas",
url=host+'/ver-tv-serie-online',
action='list_latest',
type='serie'))
itemlist.append(item.clone(title="Todas",
url=host + '/serie-tv',
action='list_all',
page='1',
type='series'))
itemlist.append(item.clone(title="Buscar",
url= host+'/esta-online?q=',
action='search',
page='1',
type='series'))
return itemlist
def search(item, texto):
logger.info("search:" + texto)
# texto = texto.replace(" ", "+")
item.filtro = ("search", texto.lower())
try:
return listado(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = urlparse.urljoin(__url_base__, "plugins/ultimas-peliculas-updated.php")
item.extra = "movie"
elif categoria == 'infantiles':
item.url = urlparse.urljoin(__url_base__, "plugins/ultimas-peliculas-updated.php")
item.filtro = ("genero", "animación")
item.extra = "movie"
elif categoria == 'series':
item.url = urlparse.urljoin(__url_base__, "plugins/ultimos-capitulos-updated.php")
item.extra = "series_novedades"
else:
return []
item.action = "listado"
itemlist = listado(item)
if itemlist[-1].action == "listado":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def listado(item):
logger.info()
itemlist = []
try:
data_dict = jsontools.load(httptools.downloadpage(item.url).data)
except:
return itemlist # Devolvemos lista vacia
# Filtrado y busqueda
if item.filtro:
for i in data_dict["result"][:]:
if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \
(item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()):
data_dict["result"].remove(i)
if not item.page:
item.page = 0
offset = int(item.page) * 30
limit = offset + 30
for i in data_dict["result"][offset:limit]:
infoLabels = InfoLabels()
idioma = ''
if item.extra == "movie":
action = "findvideos"
# viewcontent = 'movies'
infoLabels["title"] = i["title"]
title = '%s (%s)' % (i["title"], i['year'])
url = urlparse.urljoin(__url_base__, "ver-pelicula-online/" + str(i["id"]))
elif item.extra == "series":
action = "get_temporadas"
# viewcontent = 'seasons'
title = i["title"]
infoLabels['tvshowtitle'] = i["title"]
url = urlparse.urljoin(__url_base__, "episodio-online/" + str(i["id"]))
else: # item.extra=="series_novedades":
action = "findvideos"
# viewcontent = 'episodes'
infoLabels['tvshowtitle'] = i["title"]
infoLabels['season'] = i['season']
infoLabels['episode'] = i['episode'].zfill(2)
flag = scrapertools.find_single_match(i["label"], '(\s*\<img src=.*\>)')
idioma = i["label"].replace(flag, "")
title = '%s %sx%s (%s)' % (i["title"], infoLabels["season"], infoLabels["episode"], idioma)
url = urlparse.urljoin(__url_base__, "episodio-online/" + str(i["id"]))
if i.has_key("poster") and i["poster"]:
thumbnail = re.compile("/w\d{3}/").sub("/w500/", i["poster"])
else:
thumbnail = item.thumbnail
if i.has_key("background") and i["background"]:
fanart = i["background"]
else:
fanart = item.fanart
# Rellenamos el diccionario de infoLabels
infoLabels['title_id'] = i['id'] # title_id: identificador de la pelicula/serie en pepecine.com
if i['genre']: infoLabels['genre'] = i['genre']
if i['year']: infoLabels['year'] = i['year']
# if i['tagline']: infoLabels['plotoutline']=i['tagline']
if i['plot']:
infoLabels['plot'] = i['plot']
else:
infoLabels['plot'] = ""
if i['runtime']: infoLabels['duration'] = int(i['runtime']) * 60
if i['imdb_rating']:
infoLabels['rating'] = i['imdb_rating']
elif i['tmdb_rating']:
infoLabels['rating'] = i['tmdb_rating']
if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id']
if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id']
newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra,
fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", # viewcontent=viewcontent,
language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels)
newItem.year = i['year']
newItem.contentTitle = i['title']
if 'season' in infoLabels and infoLabels['season']:
newItem.contentSeason = infoLabels['season']
if 'episode' in infoLabels and infoLabels['episode']:
newItem.contentEpisodeNumber = infoLabels['episode']
itemlist.append(newItem)
# Obtenemos los datos basicos mediante multihilos
tmdb.set_infoLabels(itemlist)
# Paginacion
if len(data_dict["result"]) > limit:
itemlist.append(item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1))
return itemlist
def get_temporadas(item):
logger.info()
itemlist = []
infoLabels = {}
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
patron = 'vars.title =(.*?)};'
try:
data_dict = jsontools.load(scrapertools.get_match(data, patron) + '}')
except:
return itemlist # Devolvemos lista vacia
if item.extra == "serie_add":
itemlist = get_episodios(item)
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.extra = "busca"
if texto != '':
return sub_search(item)
else:
if len(data_dict["season"]) == 1:
# Si solo hay una temporada ...
item.infoLabels['season'] = data_dict["season"][0]["number"]
itemlist = get_episodios(item)
else: # ... o si hay mas de una temporada
item.viewcontent = "seasons"
data_dict["season"].sort(key=lambda x: (x['number'])) # ordenamos por numero de temporada
for season in data_dict["season"]:
# filtramos enlaces por temporada
enlaces = filter(lambda l: l["season"] == season['number'], data_dict["link"])
if enlaces:
item.infoLabels['season'] = season['number']
title = '%s Temporada %s' % (item.title, season['number'])
itemlist.append(item.clone(action="get_episodios", title=title,
text_color="0xFFFFCE9C", viewmode="movie_with_plot"))
# Obtenemos los datos de todas las temporadas mediante multihilos
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support() and itemlist:
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'],
'imdb_id': item.infoLabels['imdb_id']}
itemlist.append(
Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc",
action="add_serie_to_library", extra='get_episodios###serie_add', url=item.url,
contentSerieName=data_dict["title"], infoLabels=infoLabels,
thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png'))
return itemlist
return []
def get_episodios(item):
def sub_search(item):
logger.info()
itemlist = []
# infoLabels = item.infoLabels
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
patron = 'vars.title =(.*?)};'
try:
data_dict = jsontools.load(scrapertools.get_match(data, patron) + '}')
except:
return itemlist # Devolvemos lista vacia
# Agrupar enlaces por episodios temXcap
temXcap_dict = {}
for link in data_dict['link']:
try:
season = str(int(link['season']))
episode = str(int(link['episode'])).zfill(2)
except:
continue
if int(season) != item.infoLabels["season"] and item.extra != "serie_add":
# Descartamos episodios de otras temporadas, excepto si los queremos todos
continue
title_id = link['title_id']
id = season + "x" + episode
if id in temXcap_dict:
l = temXcap_dict[id]
l.append(link)
temXcap_dict[id] = l
else:
temXcap_dict[id] = [link]
# Ordenar lista de enlaces por temporada y capitulo
temXcap_list = temXcap_dict.items()
temXcap_list.sort(key=lambda x: (int(x[0].split("x")[0]), int(x[0].split("x")[1])))
for episodio in temXcap_list:
title = '%s (%s)' % (item.contentSerieName, episodio[0])
item.infoLabels['season'], item.infoLabels['episode'] = episodio[0].split('x')
itemlist.append(item.clone(action="findvideos", title=title,
viewmode="movie_with_plot", text_color="0xFFFFCE9C"))
if item.extra != "serie_add":
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist)
for i in itemlist:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
title = "%s: %s" % (i.title, i.infoLabels['title'])
i.title = title
data = httptools.downloadpage(item.url).data
f1 = "Peliculas"
action = "findvideos"
if item.type == "series":
action = "list_all"
f1 = "Series"
patron = 'Ver %s .*?id="%s' %(f1, item.type)
bloque = scrapertools.find_single_match(data, patron)
patron = 'col-sm-4 pretty-figure">\s*<a href="([^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'title="([^"]+).*?'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
path = scrapertools.find_single_match(scrapedthumbnail, "w\w+(/\w+.....)")
filtro_list = {"poster_path": path}
filtro_list = filtro_list.items()
itemlist.append(item.clone(action = "findvideos",
extra = "one",
infoLabels={'filtro': filtro_list},
thumbnail = scrapedthumbnail,
title = scrapedtitle,
fulltitle = scrapedtitle,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_latest(item):
logger.info()
itemlist = []
data = get_source(item.url)
data_url= scrapertools.find_single_match(data,'<iframe.*?src=(.*?) style')
data = get_source(data_url)
patron = "<div class='online'>.*?<img src=(.*?) class=.*?alt=(.*?) title=.*?"
patron += "<b><a href=(.*?) target=.*?align=right><div class=s7>(.*?) <"
matches = re.compile(patron,re.DOTALL).findall(data)
count = 0
for thumbnail, title, url, language in matches:
count +=1
if count >= item.indexp and count < item.indexp + perpage:
path = scrapertools.find_single_match(thumbnail, "w\w+(/\w+.....)")
filtro_list = {"poster_path": path}
filtro_list = filtro_list.items()
itemlist.append(Item(channel=item.channel,
title=title,
fulltitle=title,
contentTitle=title,
url=host+url,
thumbnail=thumbnail,
language=language,
infoLabels={'filtro': filtro_list},
extra="one",
action='findvideos'))
tmdb.set_infoLabels(itemlist)
item.indexp += perpage
itemlist.append(Item(channel=item.channel,
title="Siguiente >>",
url=item.url,
extra="one",
indexp=item.indexp,
action='list_latest'))
return itemlist
def list_all(item):
logger.info()
itemlist=[]
genero = scrapertools.find_single_match(item.url, "genre=(\w+)")
data= get_source(item.url)
token = scrapertools.find_single_match(data, "token:.*?'(.*?)'")
url = host+'/titles/paginate?_token=%s&perPage=24&page=%s&order=mc_num_of_votesDesc&type=%s&minRating=&maxRating=&availToStream=1&genres[]=%s' % (token, item.page, item.type, genero)
data = httptools.downloadpage(url).data
dict_data = jsontools.load(data)
items = dict_data['items']
for dict in items:
new_item = Item(channel=item.channel,
title=dict['title']+' [%s]' % dict['year'],
plot = dict['plot'],
thumbnail=dict['poster'],
url=dict['link'],
infoLabels={'year':dict['year']})
if item.type == 'movie':
new_item.contentTitle=dict['title']
new_item.fulltitle=dict['title']
new_item.action = 'findvideos'
elif item.type == 'series':
new_item.contentSerieName = dict['title']
new_item.action = ''
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist)
itemlist.append(item.clone(title='Siguiente>>>',
url=item.url,
action='list_all',
type= item.type,
page=str(int(item.page) + 1)))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
patron = 'vars.title =(.*?)};'
try:
data_dict = jsontools.load(scrapertools.get_match(data, patron) + '}')
except:
return itemlist # Devolvemos lista vacia
for link in data_dict["link"]:
if item.contentType == 'episode' \
and (item.contentSeason != link['season'] or item.contentEpisodeNumber != link['episode']):
# Si buscamos enlaces de un episodio descartamos los q no sean de este episodio
continue
url = link["url"]
flag = scrapertools.find_single_match(link["label"], '(\s*\<img src=.*\>)')
idioma = link["label"].replace(flag, "")
if link["quality"] != "?":
calidad = (link["quality"])
else:
calidad = ""
itemlist.extend(servertools.find_video_items(data=url))
itemlist=[]
if item.extra == "one":
data = httptools.downloadpage(item.url).data
patron = "renderTab.bind.*?'([^']+).*?"
patron += "app.utils.getFavicon.*?<b>(.*?) .*?"
patron += 'color:#B1FFC5;">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedlanguage, scrapedquality in matches:
title = "Ver enlace en %s " + "[" + scrapedlanguage + "]" + "[" + scrapedquality + "]"
if scrapedlanguage != 'zc':
itemlist.append(item.clone(action='play',
title=title,
url=scrapedurl,
language=scrapedlanguage
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
else:
for link in item.url:
language = scrapertools.find_single_match(link['label'], '(.*?) <img')
if language != 'zc':
itemlist.append(item.clone(action='play',
title=item.title,
url= link['url'],
language=language,
quality=link['quality']))
itemlist=servertools.get_servers_itemlist(itemlist)
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.quality = calidad
videoitem.language = idioma
videoitem.contentTitle = item.title
itemlist = servertools.get_servers_itemlist(itemlist)
if config.get_videolibrary_support() and itemlist and item.contentType == "movie":
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
'title': item.infoLabels['title']}
itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca",
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFe5ffcc",
thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png'))
videoitem.title = '%s [%s]' % (videoitem.server.capitalize(), videoitem.language.capitalize())
tmdb.set_infoLabels(itemlist)
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la videoteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
))
return itemlist
def episodios(item):
# Necesario para las actualizaciones automaticas
return get_temporadas(Item(channel=__chanel__, url=item.url, show=item.show, extra="serie_add"))
def play(item):
item.thumbnail = item.contentThumbnail
return [item]