@@ -124,12 +124,12 @@ def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?s)short_overlay.*?<a href="([^"]+)'
|
||||
patron += '.*?img.*?src="([^"]+)'
|
||||
patron += '.*?title="([^"]+).*?'
|
||||
patron += 'data-postid="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, thumbnail, titulo, datapostid in matches:
|
||||
matches = scrapertools.find_multiple_matches(data, '(?s)shortstory cf(.*?)rate_post')
|
||||
for datos in matches:
|
||||
url = scrapertools.find_single_match(datos, 'href="([^"]+)')
|
||||
titulo = scrapertools.find_single_match(datos, 'short_header">([^<]+)').strip()
|
||||
datapostid = scrapertools.find_single_match(datos, 'data-postid="([^"]+)')
|
||||
thumbnail = scrapertools.find_single_match(datos, 'img w.*?src="([^"]+)')
|
||||
post = 'action=get_movie_details&postID=%s' %datapostid
|
||||
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", post=post).data
|
||||
idioma = "Latino"
|
||||
|
||||
63
plugin.video.alfa/channels/cine24h.json
Normal file
63
plugin.video.alfa/channels/cine24h.json
Normal file
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"id": "cine24h",
|
||||
"name": "Cine24H",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "eng"],
|
||||
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
|
||||
"thumbnail": "https://cine24h.net/wp-content/uploads/2018/06/cine24hv2.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vose"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Castellano",
|
||||
"English"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
382
plugin.video.alfa/channels/cine24h.py
Normal file
382
plugin.video.alfa/channels/cine24h.py
Normal file
@@ -0,0 +1,382 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel CanalPelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "cine24h"
|
||||
|
||||
host = "https://cine24h.net/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
|
||||
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)),
|
||||
|
||||
item.clone(title="Series", action="series", extra='serie', url=host + 'series/',
|
||||
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
|
||||
thumbnail=get_thumb('tvshows', auto=True), page=0),
|
||||
|
||||
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0)]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def menumovies(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Novedades", action="peliculas", thumbnail=get_thumb('newest', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'peliculas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + '?s=trfilter&trfilter=1&years%5B%5D=2018', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'peliculas-mas-vistas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Géneros", action="genresYears", thumbnail=get_thumb('genres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Estrenos por Año", action="genresYears", thumbnail=get_thumb('year', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies', url=host,
|
||||
viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0, extra='buscarP')]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '</figure>(.*?)' # tipo
|
||||
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
|
||||
patron += '<span class="Year">([^<]+)</span>.*?' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, tipo, scrapedtitle, year in matches[item.page:item.page + 30]:
|
||||
if item.title == 'Buscar' and 'serie' in scrapedurl:
|
||||
action = 'temporadas'
|
||||
contentType = 'tvshow'
|
||||
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
|
||||
else:
|
||||
action = 'findvideos'
|
||||
contentType = 'movie'
|
||||
title = scrapedtitle
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
|
||||
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
|
||||
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
|
||||
title=title, context="buscar_trailer"))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genresYears(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
if item.title == "Estrenos por Año":
|
||||
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
|
||||
else:
|
||||
patron_todas = 'Géneros</a>(.*?)</li></ul></li>'
|
||||
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="peliculas"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def year_release(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if len(matches) > 1:
|
||||
for scrapedseason in matches:
|
||||
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
|
||||
new_item.infoLabels['season'] = scrapedseason
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
# Si la temporada tiene nombre propio añadirselo al titulo del item
|
||||
i.title += " - %s" % (i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedname in matches:
|
||||
scrapedtitle = scrapedtitle.replace('--', '0')
|
||||
patron = '(\d+)x(\d+)'
|
||||
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
season, episode = match[0]
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode")
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
patron = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span>(.*?)</li>' # option, server, lang - quality
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, servername, quote in matches:
|
||||
patron = '<span>(.*?) -([^<]+)</span'
|
||||
match = re.compile(patron, re.DOTALL).findall(quote)
|
||||
lang, quality = match[0]
|
||||
quality = quality.strip()
|
||||
headers = {'Referer': item.url}
|
||||
url_1 = scrapertools.find_single_match(data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
|
||||
new_data = httptools.downloadpage(url_1, headers=headers).data
|
||||
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data)
|
||||
new_data = scrapertools.decodeHtmlentities(new_data)
|
||||
url2 = scrapertools.find_single_match(new_data, '<iframe width="560" height="315" src="([^"]+)"')
|
||||
url = url2 + '|%s' % url_1
|
||||
if 'rapidvideo' in url2:
|
||||
url = url2
|
||||
|
||||
lang = lang.lower().strip()
|
||||
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'español': '[COLOR green](CAST)[/COLOR]',
|
||||
'subespañol': '[COLOR red](VOS)[/COLOR]',
|
||||
'sub': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
servername = servertools.get_server_from_url(url)
|
||||
|
||||
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
|
||||
servername.title(), quality, lang)
|
||||
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, quality=quality,
|
||||
text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
@@ -11,27 +11,28 @@ from lib import unshortenit
|
||||
|
||||
host = "http://www.descargacineclasico.net"
|
||||
|
||||
|
||||
def agrupa_datos(data):
|
||||
# Agrupa los datos
|
||||
data = re.sub(r'\n|\r|\t| |<br>|<!--.*?-->', '', data)
|
||||
data = re.sub(r'\s+', ' ', data)
|
||||
data = re.sub(r'>\s<', '><', data)
|
||||
return data
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Últimas agregadas", action="agregadas",
|
||||
url=host, viewmode="movie_with_plot",
|
||||
thumbnail=get_thumb('last', auto=True)))
|
||||
url=host, viewmode="movie_with_plot", thumbnail=get_thumb('last', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Listado por género", action="porGenero",
|
||||
url=host,
|
||||
thumbnail=get_thumb('genres', auto=True)))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Buscar", action="search", url=host,
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
url=host, thumbnail=get_thumb('genres', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Listado alfabetico", action="porLetra",
|
||||
url=host + "/cine-online/", thumbnail=get_thumb('alphabet', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host,
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
return itemlist
|
||||
|
||||
|
||||
def porLetra(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'noindex,nofollow" href="([^"]+)">(\w+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, titulo in matches:
|
||||
itemlist.append( Item(channel=item.channel , action="agregadas" , title=titulo, url=url, viewmode="movie_with_plot"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -43,7 +44,9 @@ def porGenero(item):
|
||||
data = re.compile(patron,re.DOTALL).findall(data)
|
||||
patron = '<li.*?>.*?href="([^"]+).*?>([^<]+)'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data[0])
|
||||
for url,genero in matches:
|
||||
for url, genero in matches:
|
||||
if genero == "Erótico" and config.get_setting("adult_mode") == 0:
|
||||
continue
|
||||
itemlist.append( Item(channel=item.channel , action="agregadas" , title=genero,url=url, viewmode="movie_with_plot"))
|
||||
return itemlist
|
||||
|
||||
@@ -129,7 +132,6 @@ def findvideos(item):
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
|
||||
48
plugin.video.alfa/channels/hdfilmologia.json
Normal file
48
plugin.video.alfa/channels/hdfilmologia.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"id": "hdfilmologia",
|
||||
"name": "HDFilmologia",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
|
||||
"thumbnail": "https://hdfilmologia.com/templates/gorstyle/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
262
plugin.video.alfa/channels/hdfilmologia.py
Normal file
262
plugin.video.alfa/channels/hdfilmologia.py
Normal file
@@ -0,0 +1,262 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel HDFilmologia -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "hdfilmologia"
|
||||
|
||||
host = "https://hdfilmologia.com/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Últimas Agregadas", action="movies",thumbnail=get_thumb('last', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'index.php?do=lastnews', viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Estrenos", action="movies", thumbnail=get_thumb('premieres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies', url=host + 'estrenos',
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Más Vistas", action="movies",thumbnail=get_thumb('more watched', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'mas-vistas/', viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Películas Por País", action="countriesYears",thumbnail=get_thumb('country', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Películas Por Año", action="countriesYears",thumbnail=get_thumb('year', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Géneros", action="genres",thumbnail=get_thumb('genres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"))
|
||||
|
||||
|
||||
|
||||
itemlist.append(item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?do=search&mode=advanced&subaction=search&story={0}".format(texto))
|
||||
# 'https://hdfilmologia.com/?do=search&mode=advanced&subaction=search&story=la+sombra'
|
||||
|
||||
try:
|
||||
return sub_search(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="sres-wrap clearfix" href="([^"]+)">' #url
|
||||
patron += '<div class="sres-img"><img src="/([^"]+)" alt="([^"]+)" />.*?' # img, title
|
||||
patron += '<div class="sres-desc">(.*?)</div>' # plot
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, plot in matches:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
|
||||
action="findvideos", text_color=color3, page=0, plot=plot,
|
||||
thumbnail=host+scrapedthumbnail))
|
||||
|
||||
pagination = scrapertools.find_single_match(data, 'class="pnext"><a href="([^"]+)">')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="sub_search",
|
||||
title="» Siguiente »", url=pagination))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def movies(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
patron = '<div class="kino-item ignore-select">.*?<a href="([^"]+)" class="kino-h"><h2>([^<]+)</h2>.*?' # url, title
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<div class="k-meta qual-mark">([^<]+)</div>.*?' # quality
|
||||
patron += '<strong>Año:</strong></div>([^<]+)</li>' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, year in matches[item.page:item.page + 25]:
|
||||
scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle, quality)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
|
||||
url=scrapedurl, infoLabels={'year': year.strip()},
|
||||
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
|
||||
title=title, context="buscar_trailer"))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 25 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 25,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(
|
||||
data, 'class="pnext"><a href="([^"]+)">')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genres(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="movies", title=scrapedtitle,
|
||||
url=host+scrapedurl, text_color=color3, viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def countriesYears(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
|
||||
if item.title == "Películas Por País":
|
||||
patron_todas = 'Por País</option>(.*?)</option></select>'
|
||||
else:
|
||||
patron_todas = 'Por Año</option>(.*?)<option value="/">Peliculas'
|
||||
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
patron = '<option value="/([^"]+)">([^<]+)</option>' # url, title
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=host+scrapedurl, action="movies"))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
|
||||
patron = '(\w+)src\d+="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for lang, url in matches:
|
||||
|
||||
server = servertools.get_server_from_url(url)
|
||||
if 'dropbox' in url:
|
||||
server = 'dropbox'
|
||||
if '/drive/' in url:
|
||||
data = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
|
||||
server = 'gdrive'
|
||||
|
||||
if 'ultrapeliculashd' in url:
|
||||
data = httptools.downloadpage(url).data
|
||||
# logger.info(data)
|
||||
patron = "\|s\|(\w+)\|"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for key in matches:
|
||||
url = 'https://www.dropbox.com/s/%s?dl=1' % (key)
|
||||
server = 'dropbox'
|
||||
languages = {'l': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'e': '[COLOR green](CAST)[/COLOR]',
|
||||
's': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
title = "Ver en: [COLOR yellow](%s)[/COLOR] [COLOR yellowgreen]%s[/COLOR]" % (server.title(), lang)
|
||||
if 'youtube' not in server:
|
||||
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, language=lang,
|
||||
text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
@@ -136,7 +136,7 @@ def peliculas(item):
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
pagination = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
|
||||
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
|
||||
@@ -239,7 +239,7 @@ def series(item):
|
||||
action="temporadas", contentType='tvshow'))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
pagination = scrapertools.find_single_match(data, "<link rel='next' href='([^']+)' />")
|
||||
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
|
||||
|
||||
49
plugin.video.alfa/channels/pelis24.json
Normal file
49
plugin.video.alfa/channels/pelis24.json
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"id": "pelis24",
|
||||
"name": "Pelis24",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
|
||||
"thumbnail": "https://www.pelis24.in/wp-content/uploads/2018/05/44.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
397
plugin.video.alfa/channels/pelis24.py
Normal file
397
plugin.video.alfa/channels/pelis24.py
Normal file
@@ -0,0 +1,397 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel CanalPelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "pelis24"
|
||||
|
||||
host = "https://www.pelis24.in/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [item.clone(title="Novedades", action="peliculas",thumbnail=get_thumb('newest', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'movies/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Tendencias", action="peliculas",thumbnail=get_thumb('newest', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Estrenos", action="peliculas",thumbnail=get_thumb('estrenos', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'genre/estrenos/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Géneros", action="genresYears",thumbnail=get_thumb('genres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0)]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return sub_search(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
data = scrapertools.find_single_match(data, '<header><h1>Resultados encontrados(.*?)resppages')
|
||||
# logger.info(data)
|
||||
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title
|
||||
patron += '<span class="year">([^<]+)</span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
|
||||
if 'tvshows' not in scrapedurl:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
|
||||
action="findvideos", infoLabels={"year": year},
|
||||
thumbnail=scrapedthumbnail, text_color=color3))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=paginacion,
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
|
||||
patron += '<span class="quality">([^<]+)</span> </div>\s*<a href="([^"]+)">.*?' # quality, url
|
||||
patron += '</h3><span>([^<]+)</span>' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches[item.page:item.page + 30]:
|
||||
title = '%s [COLOR yellowgreen](%s)[/COLOR]' % (scrapedtitle, quality)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
|
||||
url=scrapedurl, infoLabels={'year': year}, quality=quality,
|
||||
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
|
||||
title=title, context="buscar_trailer"))
|
||||
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genresYears(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
if item.title == "Estrenos por Año":
|
||||
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
|
||||
else:
|
||||
patron_todas = '<h2>Generos</h2>(.*?)</div><aside'
|
||||
# logger.error(texto='***********uuuuuuu*****' + patron_todas)
|
||||
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
# logger.error(texto='***********uuuuuuu*****' + data)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i>' # url, title, videos
|
||||
# patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, videos_num in matches:
|
||||
title = '%s (%s)' % (scrapedtitle, videos_num.replace('.', ','))
|
||||
|
||||
itemlist.append(item.clone(title=title, url=scrapedurl, action="peliculas"))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def year_release(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
thumbnail='https:'+scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if len(matches) > 1:
|
||||
for scrapedseason in matches:
|
||||
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
|
||||
new_item.infoLabels['season'] = scrapedseason
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
# Si la temporada tiene nombre propio añadirselo al titulo del item
|
||||
i.title += " - %s" % (i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedname in matches:
|
||||
scrapedtitle = scrapedtitle.replace('--', '0')
|
||||
patron = '(\d+)x(\d+)'
|
||||
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
season, episode = match[0]
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode")
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
# logger.info(data)
|
||||
|
||||
# patron1 = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span><span>(.*?)</span>' # option, server, lang - quality
|
||||
patron = 'href="#option-(.*?)"><span class="dt_flag"><img src="[^"]+"></span>([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
# urls = re.compile(patron2, re.DOTALL).findall(data)
|
||||
|
||||
for option, lang in matches:
|
||||
url = scrapertools.find_single_match(data, '<div id="option-%s" class="[^"]+"><iframe class="metaframe rptss" src="([^"]+)"' % option)
|
||||
lang = lang.lower().strip()
|
||||
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'español': '[COLOR green](CAST)[/COLOR]',
|
||||
'subespañol': '[COLOR red](VOS)[/COLOR]',
|
||||
'sub': '[COLOR red](VOS)[/COLOR]',
|
||||
'ingles': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (server.title(), item.quality, lang)
|
||||
# if 'google' not in url and 'directo' not in server:
|
||||
|
||||
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
64
plugin.video.alfa/channels/pelishd24.json
Normal file
64
plugin.video.alfa/channels/pelishd24.json
Normal file
@@ -0,0 +1,64 @@
|
||||
{
|
||||
"id": "pelishd24",
|
||||
"name": "PelisHD24",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "eng"],
|
||||
"fanart": "https://pelishd24.com/wp-content/uploads/2018/11/background.png",
|
||||
"thumbnail": "https://pelishd24.com/wp-content/uploads/2018/07/pelishd24.2.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Castellano",
|
||||
"English"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
464
plugin.video.alfa/channels/pelishd24.py
Normal file
464
plugin.video.alfa/channels/pelishd24.py
Normal file
@@ -0,0 +1,464 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel PelisHD24 -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from lib import generictools
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "pelishd24"
|
||||
|
||||
host = "https://pelishd24.com/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'English': 'VOS'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
|
||||
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)),
|
||||
|
||||
item.clone(title="Series", action="series", extra='serie', url=host + 'series/',
|
||||
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
|
||||
thumbnail=get_thumb('tvshows', auto=True), page=0),
|
||||
|
||||
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0)]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def menumovies(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Todas", action="peliculas", thumbnail=get_thumb('all', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'peliculas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + '?s=trfilter&trfilter=1&years=2018', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'mas-vistas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Más Votadas", action="peliculas", thumbnail=get_thumb('more voted', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'peliculas-mas-votadas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Géneros", action="genres_atoz", thumbnail=get_thumb('genres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="A-Z", action="genres_atoz", thumbnail=get_thumb('year', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies', url=host,
|
||||
viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0, extra='buscarP')]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
action = ''
|
||||
contentType = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '</figure>(.*?)' # tipo
|
||||
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
|
||||
patron += '<span class="Year">([^<]+)</span>.*?' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, tipo, scrapedtitle, year in matches[item.page:item.page + 30]:
|
||||
title = ''
|
||||
if '/serie/' in scrapedurl:
|
||||
action = 'temporadas'
|
||||
contentType = 'tvshow'
|
||||
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
|
||||
else:
|
||||
action = 'findvideos'
|
||||
contentType = 'movie'
|
||||
title = scrapedtitle
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
|
||||
url=scrapedurl, infoLabels={'year': year}, extra='peliculas',
|
||||
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
|
||||
title=title, context="buscar_trailer", contentType=contentType))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genres_atoz(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
action = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
if item.title == "A-Z":
|
||||
patron_todas = '<ul class="AZList"(.*?)</li></ul>'
|
||||
action = 'atoz'
|
||||
else:
|
||||
patron_todas = '<a href="#">GENERO</a>(.*?)</li></ul>'
|
||||
action = 'peliculas'
|
||||
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action=action))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def atoz(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
patron = '<td class="MvTbImg"> <a href="([^"]+)".*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<strong>([^<]+)</strong> </a></td><td>([^<]+)</td>.*?' # title, year
|
||||
patron += '<span class="Qlty">([^<]+)</span>' # quality
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, quality in matches[item.page:item.page + 30]:
|
||||
title = ''
|
||||
action = ''
|
||||
if '/serie/' in scrapedurl:
|
||||
action = 'temporadas'
|
||||
contentType = 'tvshow'
|
||||
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
|
||||
else:
|
||||
action = 'findvideos'
|
||||
contentType = 'movie'
|
||||
title = "%s [COLOR yellow]%s[/COLOR]" % (scrapedtitle, quality)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, contentType=contentType,
|
||||
url=scrapedurl, infoLabels={'year': year}, extra='peliculas',
|
||||
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
|
||||
title=title, context="buscar_trailer", show=scrapedtitle, ))
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
|
||||
patron = '<article class="TPost C">\s*<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if len(matches) > 1:
|
||||
for scrapedseason in matches:
|
||||
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
|
||||
new_item.infoLabels['season'] = scrapedseason
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
# Si la temporada tiene nombre propio añadirselo al titulo del item
|
||||
i.title += " - %s" % (i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron += host + 'episode/(.*?)/">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedname in matches:
|
||||
scrapedtitle = scrapedtitle.replace('--', '0')
|
||||
patron = '(\d+)x(\d+)'
|
||||
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
season, episode = match[0]
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode", extra='episodios')
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
patron = 'data-tplayernv="Opt(.*?)"><span>[^"<]+</span>(.*?)</li>' # option, servername, lang - quality
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, quote in matches:
|
||||
patron = '<span>(.*?) -([^<]+)</span'
|
||||
match = re.compile(patron, re.DOTALL).findall(quote)
|
||||
lang, quality = match[0]
|
||||
quality = quality.strip()
|
||||
lang = lang.lower().strip()
|
||||
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'subtitulado': '[COLOR red](VOS)[/COLOR]'}
|
||||
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
url_1 = scrapertools.find_single_match(data,
|
||||
'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
|
||||
new_data = httptools.downloadpage(url_1).data
|
||||
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data)
|
||||
new_data = scrapertools.decodeHtmlentities(new_data)
|
||||
patron1 = '<iframe width="560" height="315" src="([^"]+)"'
|
||||
match1 = re.compile(patron1, re.DOTALL).findall(new_data)
|
||||
|
||||
urls = scrapertools.find_single_match(new_data, '<iframe width="560" height="315" src="([^"]+)"')
|
||||
servername = servertools.get_server_from_url(urls)
|
||||
if 'stream.pelishd24.net' in urls:
|
||||
vip_data = httptools.downloadpage(urls).data
|
||||
dejuiced = generictools.dejuice(vip_data)
|
||||
patron = '"file":"([^"]+)"'
|
||||
match = re.compile(patron, re.DOTALL).findall(dejuiced)
|
||||
for scrapedurl in match:
|
||||
urls = scrapedurl
|
||||
servername = 'gvideo'
|
||||
if 'pelishd24.com/?trhide' in urls:
|
||||
data = httptools.downloadpage(urls).data
|
||||
# logger.error(texto='****hex'+data)
|
||||
patron = '"file":"([^"]+)"'
|
||||
match = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl in match:
|
||||
urls = scrapedurl
|
||||
servername = 'gvideo'
|
||||
|
||||
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
|
||||
servername.title(), quality, lang)
|
||||
if 'embed.pelishd24.com' not in urls and 'embed.pelishd24.net' not in urls:
|
||||
itemlist.append(item.clone(action='play', title=title, url=urls, language=lang, quality=quality,
|
||||
text_color=color3))
|
||||
|
||||
for url in match1:
|
||||
new_data = httptools.downloadpage(url).data
|
||||
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data)
|
||||
new_data = scrapertools.decodeHtmlentities(new_data)
|
||||
patron1 = '\["\d+","([^"]+)",\d+]'
|
||||
match1 = re.compile(patron1, re.DOTALL).findall(new_data)
|
||||
for url in match1:
|
||||
url = url.replace('\\', '')
|
||||
servername = servertools.get_server_from_url(url)
|
||||
if 'pelishd24.net' in url or 'stream.pelishd24.com' in url:
|
||||
vip_data = httptools.downloadpage(url).data
|
||||
dejuiced = generictools.dejuice(vip_data)
|
||||
patron = '"file":"([^"]+)"'
|
||||
match = re.compile(patron, re.DOTALL).findall(dejuiced)
|
||||
for scrapedurl in match:
|
||||
url = scrapedurl
|
||||
servername = 'gvideo'
|
||||
|
||||
if 'ww3.pelishd24.com' in url:
|
||||
data1 = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(data1, '"file": "([^"]+)"')
|
||||
servername = 'gvideo'
|
||||
|
||||
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
|
||||
servername.title(), quality, lang)
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, url=url, language=lang, quality=quality,
|
||||
text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
93
plugin.video.alfa/channels/pelisplay.json
Normal file
93
plugin.video.alfa/channels/pelisplay.json
Normal file
@@ -0,0 +1,93 @@
|
||||
{
|
||||
"id": "pelisplay",
|
||||
"name": "PelisPlay",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"fanart": "https://s33.postimg.cc/d3ioghaof/image.png",
|
||||
"thumbnail": "https://www.pelisplay.tv/static/img/logo.png",
|
||||
"banner": "https://s33.postimg.cc/cyex6xlen/image.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
414
plugin.video.alfa/channels/pelisplay.py
Normal file
414
plugin.video.alfa/channels/pelisplay.py
Normal file
@@ -0,0 +1,414 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel PelisPlay -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "pelisplay"
|
||||
|
||||
host = "https://www.pelisplay.tv/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload']
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
|
||||
viewcontent='movie', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")),
|
||||
|
||||
item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshow', viewmode="tvshow_with_plot",
|
||||
thumbnail=get_thumb("channels_tvshow.png")),
|
||||
|
||||
item.clone(title="Netflix", action="flixmovies", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshows', viewmode="movie_with_plot", fanart='https://i.postimg.cc/jjN85j8s/netflix-logo.png',
|
||||
thumbnail='http://img.app.kiwi/icon/jcbqFma-5e91cY9MlEasA-fvCRJK493MxphrqbBd8oS74FtYg00IXeOAn0ahsLprxIA'),
|
||||
|
||||
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
|
||||
thumbnail=get_thumb('search.png'), url=host+'buscar')]
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def menumovies(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Estrenos", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas/estrenos', viewmode="movie_with_plot"),
|
||||
item.clone(title="Más Populares", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas?filtro=visitas', viewmode="movie_with_plot"),
|
||||
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas?filtro=fecha_creacion', viewmode="movie_with_plot"),
|
||||
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
|
||||
viewcontent='movie', url=host, viewmode="movie_with_plot"),
|
||||
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie',
|
||||
viewcontent='movie', url=host+'peliculas', viewmode="movie_with_plot"),
|
||||
item.clone(title="Buscar", action="search", text_blod=True, extra='buscarp',
|
||||
thumbnail=get_thumb('search.png'), url=host+'peliculas')]
|
||||
return itemlist
|
||||
|
||||
def flixmovies(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Novedades", action="peliculas", text_blod=True, url=host + 'peliculas/netflix?filtro=fecha_actualizacion',
|
||||
viewcontent='movie', viewmode="movie_with_plot"),
|
||||
# item.clone(title="Estrenos", action="peliculas", text_blod=True,
|
||||
# viewcontent='movie', url=host + 'peliculas/estrenos', viewmode="movie_with_plot"),
|
||||
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas/netflix?filtro=visitas', viewmode="movie_with_plot"),
|
||||
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
|
||||
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
|
||||
viewcontent='movie', url=host, viewmode="movie_with_plot"),
|
||||
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie',
|
||||
viewcontent='movie', url=host+'netflix', viewmode="movie_with_plot")]
|
||||
return itemlist
|
||||
|
||||
|
||||
def menuseries(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Novedades", action="series", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshow', url=host + 'series', viewmode="tvshow_with_plot"),
|
||||
|
||||
item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshow', url=host + 'series?filtro=visitas', viewmode="tvshow_with_plot"),
|
||||
|
||||
item.clone(title="Recíen Agregadas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshow', url=host + 'series?filtro=fecha_actualizacion', viewmode="tvshow_with_plot"),
|
||||
|
||||
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='serie',
|
||||
viewcontent='movie', url=host+'series', viewmode="movie_with_plot"),
|
||||
item.clone(title="Buscar", action="search", text_blod=True, extra='buscars',
|
||||
thumbnail=get_thumb('search.png'), url=host+'series')]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def p_portipo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
action = ''
|
||||
patron = '<li class="item"><a href="([^"]+)" class="category">.*?' # url
|
||||
patron += '<div class="[^<]+<img class="[^"]+" src="/([^"]+)"></div><div class="[^"]+">([^<]+)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
if item.extra == 'movie':
|
||||
action = 'peliculas'
|
||||
elif item.extra == 'serie':
|
||||
action = 'series'
|
||||
itemlist.append(item.clone(action = action,
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl,
|
||||
thumbnail=scrapedthumbnail
|
||||
))
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<img class="posterentrada" src="/([^"]+)".*?' # img
|
||||
patron += '<a href="([^"]+)">.*?' # url
|
||||
patron += '<p class="description_poster">.*?\(([^<]+)\)</p>.*?' # year
|
||||
patron += '<div class="Description"> <div>([^<]+)</div>.*?' # plot
|
||||
patron += '<strong>([^<]+)</strong></h4>' # title
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, year, plot, scrapedtitle in matches:
|
||||
if item.infoLabels['plot'] == '':
|
||||
item.plot = plot
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
|
||||
infoLabels={"year":year}, thumbnail=host+scrapedthumbnail,
|
||||
url=scrapedurl, title=scrapedtitle, plot=plot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
|
||||
url=pagination, folder=True, text_blod=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?q={0}".format(texto))
|
||||
if item.extra == 'buscarp' or item.extra == 'buscars':
|
||||
item.url = urlparse.urljoin(item.url, "?buscar={0}".format(texto))
|
||||
|
||||
try:
|
||||
if item.extra == 'buscars':
|
||||
return series(item)
|
||||
return peliculas(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png")))
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host + 'movies/'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + "genre/animacion/"
|
||||
elif categoria == 'terror':
|
||||
item.url = host + "genre/terror/"
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == "» Siguiente »":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<img class="portada" src="/([^"]+)"><[^<]+><a href="([^"]+)".*?' # img, url
|
||||
patron += 'class="link-title"><h2>([^<]+)</h2>' # title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
|
||||
url=scrapedurl, thumbnail=host+scrapedthumbnail,
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
action="temporadas", contentType='tvshow'))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
from core import jsontools
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<img class="posterentrada" src="/([^"]+)" alt="\w+\s*(\w+).*?'
|
||||
patron += 'class="abrir_temporada" href="([^"]+)">' # img, season
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 1:
|
||||
for scrapedthumbnail, temporada, url in matches:
|
||||
new_item = item.clone(action="episodios", season=temporada, url=url,
|
||||
thumbnail=host+scrapedthumbnail, extra='serie')
|
||||
new_item.infoLabels['season'] = temporada
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
# Si la temporada tiene nombre propio añadírselo al titulo del item
|
||||
i.title += " - %s" % (i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
from core import jsontools
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
post_link = '%sentradas/abrir_temporada' % host
|
||||
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
|
||||
data_t = scrapertools.find_single_match(data, '<a data-s="[^"]+" data-t="([^"]+)"')
|
||||
data_s = scrapertools.find_single_match(data, '<a data-s="([^"]+)" data-t="[^"]+"')
|
||||
post= {'t':data_t, 's':data_s, '_token':token}
|
||||
post = urllib.urlencode(post)
|
||||
new_data = httptools.downloadpage(post_link, post=post).data
|
||||
# json_data = jsontools.load(new_data)
|
||||
# logger.info(new_data)
|
||||
patron = '"nepisodio":"([^"]+)",[^,]+,"ntemporada":"([^"]+)".*?"url_directa":"([^"]+)",.*?"titulo":"([^"]+)",'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(new_data)
|
||||
for episode, season, scrapedurl, scrapedname in matches:
|
||||
scrapedurl = scrapedurl.replace('\\', '')
|
||||
logger.info('###name%s' % scrapedname)
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode", extra='serie')
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
itemlist.append(new_item)
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capítulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadírselo al titulo del item
|
||||
i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
from lib import generictools
|
||||
from core import jsontools
|
||||
import urllib
|
||||
import base64
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
patron = 'data-player="([^"]+)"[^>]+>([^<]+)</div>.*?' # data-player, servername
|
||||
patron += '<td class="[^"]+">([^<]+)</td><td class="[^"]+">([^<]+)</td>' # quality, lang
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for data_player, servername, quality, lang in matches:
|
||||
post_link = '%sentradas/procesar_player' % host
|
||||
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
|
||||
post= {'data':data_player, 'tipo':'videohost', '_token':token}
|
||||
post = urllib.urlencode(post)
|
||||
new_data = httptools.downloadpage(post_link, post=post).data
|
||||
json_data = jsontools.load(new_data)
|
||||
url = json_data['data']
|
||||
|
||||
if 'pelisplay.tv/embed/' in url:
|
||||
new_data = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(new_data, '"file":"([^"]+)",').replace('\\', '')
|
||||
|
||||
elif 'fondo_requerido' in url:
|
||||
link = scrapertools.find_single_match(url, '=(.*?)&fondo_requerido').partition('&')[0]
|
||||
post_link = '%sprivate/plugins/gkpluginsphp.php' % host
|
||||
post= {'link':link}
|
||||
post = urllib.urlencode(post)
|
||||
new_data2 = httptools.downloadpage(post_link, post=post).data
|
||||
url = scrapertools.find_single_match(new_data2, '"link":"([^"]+)"').replace('\\', '')
|
||||
|
||||
lang = lang.lower().strip()
|
||||
idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'subtitulado': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in idioma:
|
||||
lang = idioma[lang]
|
||||
|
||||
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (servername.title(), quality, lang)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, title=title, action='play', language=lang, quality=quality, url=url))
|
||||
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=get_thumb("videolibrary_movie.png"), contentTitle=item.contentTitle))
|
||||
return itemlist
|
||||
@@ -66,16 +66,21 @@ def lista(item):
|
||||
action = "menu_info"
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<div class="video-item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="durations">.*?</i>([^<]+)<'
|
||||
patron = '<div class="video-item.*?href="([^"]+)" '
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += 'data-src="([^"]+)"'
|
||||
patron += '(.*?)<div class="durations">.*?'
|
||||
patron += '</i>([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
|
||||
if "go.php?" in scrapedurl:
|
||||
scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0])
|
||||
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
else:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = host + "%s" % scrapedthumbnail
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
if duration:
|
||||
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
|
||||
if '>HD<' in quality:
|
||||
@@ -110,7 +115,6 @@ def lista(item):
|
||||
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
|
||||
item.url, next_page)
|
||||
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -225,7 +229,6 @@ def play(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_data(item.url)
|
||||
|
||||
patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\''
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if not matches:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "rexpelis",
|
||||
"name": "Rexpelis",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["lat","cast"],
|
||||
"thumbnail": "https://i.postimg.cc/MMJ5g9Y1/rexpelis1.png",
|
||||
|
||||
@@ -59,7 +59,7 @@ def list_all(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"'
|
||||
patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-lazy-src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
"name": "ThumbZilla",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": "*",
|
||||
"language": "en",
|
||||
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg",
|
||||
"thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png",
|
||||
"thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png?cache=2018110203",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
@@ -35,4 +35,3 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -44,28 +44,36 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=__channel__, action="videos", title="Más Calientes", url=host,
|
||||
viewmode="movie", thumbnail=get_thumb("/channels_adult.png")))
|
||||
viewmode="movie", thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + '/newest',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/trending',
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/tending',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + '/top',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Populares", url=host + '/popular',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='homemade',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias",
|
||||
url=host + '/categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host,
|
||||
thumbnail=get_thumb("channels_adult.png"), extra="buscar"))
|
||||
return itemlist
|
||||
@@ -92,6 +100,7 @@ def search(item, texto):
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a class="[^"]+" href="([^"]+)">' # url
|
||||
@@ -99,15 +108,20 @@ def videos(item):
|
||||
patron += '<span class="title">([^<]+)</span>.*?' # title
|
||||
patron += '<span class="duration">([^<]+)</span>' # time
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, time in matches:
|
||||
title = "[%s] %s" % (time, scrapedtitle)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, thumbnail=scrapedthumbnail,
|
||||
url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="videos",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -116,9 +130,12 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url
|
||||
patron += '<span class="count">([^<]+)</span>' # title, vids
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, vids in matches:
|
||||
scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title()
|
||||
title = "%s (%s)" % (scrapedtitle, vids.title())
|
||||
@@ -127,17 +144,14 @@ def categorias(item):
|
||||
itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail,
|
||||
title=title, url=url, thumbnail=thumbnail,
|
||||
viewmode="movie_with_plot", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
|
||||
patron = '<li><a class="qualityButton active" data-quality="([^"]+)">([^"]+)</a></li>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl,calidad in matches:
|
||||
title = "[COLOR yellow](%s)[/COLOR] %s" % (calidad, item.contentTile)
|
||||
itemlist.append(item.clone(channel=item.channel, action="play", title=item.title , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
url = scrapertools.find_single_match(data, '"quality":"[^"]+","videoUrl":"([^"]+)"').replace('\\', '')
|
||||
itemlist.append(item.clone(url=url, title=item.contentTile))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -13,6 +13,7 @@ from platformcode import config, logger
|
||||
__channel__ = "xms"
|
||||
|
||||
host = 'https://xxxmoviestream.com/'
|
||||
host1 = 'https://www.cam4.com/'
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
@@ -41,7 +42,6 @@ thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Últimas", url=host + '?filtre=date&cat=0',
|
||||
@@ -60,32 +60,50 @@ def mainlist(item):
|
||||
url=host + 'categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=thumbnail % '4'))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="WebCam", action="webcamenu",
|
||||
viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail='https://ae01.alicdn.com/kf/HTB1LDoiaHsrBKNjSZFpq6AXhFXa9/-.jpg'))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host, thumbnail=thumbnail % '5'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def webcamenu(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Trending Cams", action="webcam", text_blod=True, url=host1,
|
||||
viewcontent='movies', viewmode="movie_with_plot"),
|
||||
item.clone(title="Females", action="webcam", text_blod=True,
|
||||
viewcontent='movies', url=host1 + 'female', viewmode="movie_with_plot"),
|
||||
item.clone(title="Males", action="webcam", text_blod=True,
|
||||
viewcontent='movies', url=host1 + 'male', viewmode="movie_with_plot"),
|
||||
item.clone(title="Couples", action="webcam", text_blod=True,
|
||||
viewcontent='movies', url=host1 + 'couple', viewmode="movie_with_plot"),
|
||||
item.clone(title="Trans", action="webcam", text_blod=True, extra="Películas Por año",
|
||||
viewcontent='movies', url=host1 + 'transgender', viewmode="movie_with_plot")]
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data)
|
||||
# logger.info(data)
|
||||
patron_todos = '<div id="content">(.*?)<div id="footer"'
|
||||
data = scrapertools.find_single_match(data, patron_todos)
|
||||
|
||||
patron = 'src="([^"]+)" class="attachment-thumb_site.*?' # img
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?' #url, title
|
||||
patron += '<div class="right"><p>([^<]+)</p>' # plot
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?' # url, title
|
||||
patron += '<div class="right"><p>([^<]+)</p>' # plot
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches:
|
||||
plot = scrapertools.decodeHtmlentities(plot)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", title=scrapedtitle.capitalize(),
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, fanart=scrapedthumbnail,
|
||||
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot},
|
||||
fanart=scrapedthumbnail,viewmode="movie_with_plot",
|
||||
folder=True, contentTitle=scrapedtitle))
|
||||
# Extrae el paginador
|
||||
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)">Next ›</a></li><li>')
|
||||
paginacion = urlparse.urljoin(item.url, paginacion)
|
||||
@@ -95,6 +113,36 @@ def peliculas(item):
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def webcam(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data)
|
||||
patron = '<div class="profileBox">.*?<a href="/([^"]+)".*?' # url
|
||||
patron += 'data-hls-preview-url="([^"]+)">.*?' # video_url
|
||||
patron += 'data-username="([^"]+)".*?' # username
|
||||
patron += 'title="([^"]+)".*?' # title
|
||||
patron += 'data-profile="([^"]+)" />' # img
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, video_url, username, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapedtitle.replace(' Chat gratis con webcam.', '')
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="play", title=scrapedtitle,
|
||||
url=video_url, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
|
||||
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
|
||||
# Extrae el paginador
|
||||
paginacion = scrapertools.find_single_match(data, '<span id="pagerSpan">\d+</span> <a href="([^"]+)"')
|
||||
paginacion = urlparse.urljoin(item.url, paginacion)
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=__channel__, action="webcam",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -104,10 +152,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron += '<span>([^<]+)</span></a>.*?' # title
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron += '<span>([^<]+)</span></a>.*?' # title
|
||||
patron += '<span class="nb_cat border-radius-5">([^<]+)</span>' # num_vids
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -143,16 +190,15 @@ def sub_search(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += 'title="([^"]+)" />.*?' # title
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += 'title="([^"]+)" />.*?' # title
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron += '<div class="right"><p>([^<]+)</p>' # plot
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=plot, fanart=scrapedthumbnail,
|
||||
action="findvideos", thumbnail=scrapedthumbnail))
|
||||
action="findvideos", thumbnail=scrapedthumbnail))
|
||||
|
||||
paginacion = scrapertools.find_single_match(
|
||||
data, "<a href='([^']+)' class=\"inactive\">\d+</a>")
|
||||
@@ -168,8 +214,6 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<iframe src="[^"]+".*?<iframe src="([^"]+)" scrolling="no" frameborder="0"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
@@ -179,5 +223,4 @@ def findvideos(item):
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, server=server, url=url))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"id": "youporn",
|
||||
"name": "youporn",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://fs.ypncdn.com/cb/bundles/youpornwebfront/images/l_youporn_black.png",
|
||||
"banner": "",
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "vidup.(?:me|tv)/(?:embed-|)([A-z0-9]+)",
|
||||
"pattern": "vidup.(?:me|tv|io)/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://vidup.tv/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -20,8 +20,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
video_urls = []
|
||||
post= {}
|
||||
post = urllib.urlencode(post)
|
||||
url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
data = httptools.downloadpage("https://vidup.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data
|
||||
headers = {"Referer":page_url}
|
||||
url = httptools.downloadpage(page_url, follow_redirects=False, headers=headers, only_headers=True).headers.get("location", "")
|
||||
data = httptools.downloadpage("https://vidup.io/api/serve/video/" + scrapertools.find_single_match(url, "embed.([A-z0-9]+)"), post=post).data
|
||||
bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
|
||||
for res, media_url in matches:
|
||||
|
||||
Reference in New Issue
Block a user