Merge remote-tracking branch 'alfa-addon/master' into Fixes

This commit is contained in:
Unknown
2017-12-15 08:53:43 -03:00
16 changed files with 672 additions and 478 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.4.5" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.4.6" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,11 +19,11 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» cinetux » peliculasgratis
» gamovideo » peliculasaudiolatino
» streamixcloud » uptobox
» canalpelis » verpelis
¤ arreglos internos
» gnula » playpornx
» plusdede » yaske
» streamplay » bdupload
» bitertv » userscloud
» canalpelis ¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -243,7 +243,6 @@ def series(item):
url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if url_next_page:
@@ -274,7 +273,7 @@ def temporadas(item):
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
@@ -328,7 +327,6 @@ def episodios(item):
if not item.extra:
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
@@ -355,26 +353,27 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" ' \
'allowfullscreen></iframe>'
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
# matches = re.compile(patron, re.DOTALL).findall(data)
for option, url in matches:
lang = scrapertools.find_single_match(data,
'<li><a class="options" href="#option-%s"><b class="icon-play_arrow"><\/b> (.*?)<span '
'class="dt_flag">' % option)
datas = httptools.downloadpage(urlparse.urljoin(host, url),
headers={'Referer': item.url}).data
patron = '<iframe[^>]+src="([^"]+)"'
url = scrapertools.find_single_match(datas, patron)
lang = scrapertools.find_single_match(
data, '<li><a class="options" href="#option-%s"><b class="icon-play_arrow"><\/b> (.*?)<span class="dt_flag">' % option)
lang = lang.replace('Español ', '').replace('B.S.O. ', '')
data_b = httptools.downloadpage(urlparse.urljoin(host, url), headers={'Referer': item.url}).data
patron = '<iframe[^>]+src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data_b)
url = matches[0]
server = servertools.get_server_from_url(url)
title = "%s [COLOR yellow](%s) (%s)[/COLOR]" % (item.contentTitle, server.title(), lang)
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title, server=server, language=lang,
text_color=color3))
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
server=server, language = lang, text_color=color3))
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',

View File

@@ -51,8 +51,6 @@ def generos(item):
def peliculas(item):
logger.info()
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
@@ -61,25 +59,22 @@ def peliculas(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
language = []
plot = scrapertools.htmlclean(resto).strip()
logger.debug('plot: %s' % plot)
languages = scrapertools.find_multiple_matches(plot, r'\((V.)\)')
quality = scrapertools.find_single_match(plot, r'(?:\[.*?\].*?)\[(.*?)\]')
for lang in languages:
language.append(lang)
logger.debug('languages: %s' % languages)
title = scrapedtitle + " " + plot
contentTitle = scrapedtitle
url = item.url + scrapedurl
if not scrapedurl.startswith("http"):
scrapedurl = item.url + scrapedurl
itemlist.append(Item(channel = item.channel,
action = 'findvideos',
title = title,
url = url,
url = scrapedurl,
thumbnail = scrapedthumbnail,
plot = plot,
hasContentDetails = True,
contentTitle = contentTitle,
contentTitle = scrapedtitle,
contentType = "movie",
context = ["buscar_trailer"],
language=language,
quality=quality
))
@@ -89,13 +84,11 @@ def peliculas(item):
def findvideos(item):
logger.info("item=" + item.tostring())
itemlist = []
# Descarga la página para obtener el argumento
data = httptools.downloadpage(item.url).data
item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
patron = 'Ver película online.*?>.*?>([^<]+)'
patron = '<strong>Ver película online.*?>.*?>([^<]+)'
scrapedopcion = scrapertools.find_single_match(data, patron)
titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper()
bloque = scrapertools.find_multiple_matches(data, 'contenedor_tab.*?/table')

View File

@@ -7,7 +7,7 @@ from core import scrapertools
from core.item import Item
from platformcode import logger
host = "http://www.playpornx.net/"
host = "https://watchfreexxx.net/"
def mainlist(item):
@@ -17,7 +17,7 @@ def mainlist(item):
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
url =host))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url='http://www.playpornx.net/?s=',
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'))
@@ -31,13 +31,21 @@ def lista(item):
if item.url == '': item.url = host
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<div class=item>.*?href=(.*?)><div.*?<img src=(.*?) alt.*?<h2>(.*?)<\/h2>'
if item.extra != 'Buscar':
patron = '<div class=item>.*?href=(.*?)><div.*?<img src=(.*?) alt=(.*?) width'
else:
patron = '<div class=movie>.*?<img src=(.*?) alt=(.*?) \/>.*?href=(.*?)\/>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
title = scrapedtitle
for data_1, data_2, data_3 in matches:
if item.extra != 'Buscar':
url = data_1
thumbnail = data_2
title = data_3
else:
url = data_3
thumbnail = data_1
title = data_2
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail))
@@ -59,6 +67,7 @@ def search(item, texto):
try:
if texto != '':
item.extra = 'Buscar'
return lista(item)
else:
return []

View File

@@ -34,7 +34,8 @@ def login():
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
url = "https://www.plusdede.com/"
headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36","Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
replace_headers=False).data
if "redirect" in data:
@@ -771,14 +772,17 @@ def checkseen(item):
if item.tipo == "8":
url_temp = "https://www.plusdede.com/set/episode/" + item.data_id + "/seen"
tipo_str = "series"
headers = {"Referer": "https://www.plusdede.com/serie/", "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
else:
url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.data_id + "/seen"
tipo_str = "pelis"
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
#logger.debug(data)
return True
@@ -927,7 +931,8 @@ def plusdede_check(item):
tipo_str = "listas"
else:
tipo_str = "pelis"
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36","Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
replace_headers=True).data.strip()

View File

@@ -267,10 +267,9 @@ def channel_search(search_results, channel_parameters, tecleado):
if result is None:
result = []
if len(result):
if not channel_parameters["title"] in search_results:
search_results[channel_parameters["title"]] = []
search_results[channel_parameters["title"]].append({"item": item,
if not channel_parameters["title"].capitalize() in search_results:
search_results[channel_parameters["title"].capitalize()] = []
search_results[channel_parameters["title"].capitalize()].append({"item": item,
"itemlist": result,
"adult": channel_parameters["adult"]})

View File

@@ -105,7 +105,6 @@ def peliculas(item):
patron += 'post(.*?)</div.*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
patron_next_page = 'href="([^"]+)"> &raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if len(matches_next_page) > 0:
@@ -131,7 +130,7 @@ def peliculas(item):
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
if url_next_page:
if matches_next_page:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_next_page, folder=True, text_color=color3, text_bold=True))
@@ -175,18 +174,25 @@ def findvideos(item):
mtmdb = scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
patron = '(?s)id="online".*?server="([^"]+)"'
mserver = scrapertools.find_single_match(data, patron)
url = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver)
data = httptools.downloadpage(url).data
url_m = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver)
patron = '/\?tmdb=[^"]+.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
data = httptools.downloadpage(url_m).data
matches = scrapertools.find_multiple_matches(data, patron)
for server, url, idioma, calidad in matches:
if "drive" in server:
server = "gvideo"
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(),
title="Ver en %s %s" %(server, calidad)
))
page = 2
while len(matches)>0:
for server, url, idioma, calidad in matches:
if "drive" in server:
server = "gvideo"
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(),
server = server,
title="Ver en %s %s" %(server, calidad)
))
data = httptools.downloadpage(url_m + "&page=%s" %page).data
matches = scrapertools.find_multiple_matches(data, patron)
page +=1
sublist = sorted(sublist, key=lambda Item: Item.quality + Item.server)
for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
@@ -213,6 +219,6 @@ def play(item):
ddd = httptools.downloadpage(item.url).data
url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)')
item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone())
itemlist.append(item.clone(server = ""))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -1,38 +0,0 @@
{
"id": "zpeliculas",
"name": "Zpeliculas",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "zpeliculas.png",
"thumbnail": "zpeliculas.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,370 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
# itemlist.append( Item(channel=item.channel, action="destacadas" , title="Destacadas", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png"))
itemlist.append(
Item(channel=item.channel, action="peliculas", title="Últimas peliculas", url="http://www.zpeliculas.com/",
fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie"))
# itemlist.append( Item(channel=item.channel, action="sugeridas" , title="Películas sugeridas", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por género", url="http://www.zpeliculas.com",
fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png"))
itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabético",
fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscador", url="http://www.zpeliculas.com",
fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie"))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="peliculas", title="A", url="http://www.zpeliculas.com/cat/a",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="B", url="http://www.zpeliculas.com/cat/b",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="C", url="http://www.zpeliculas.com/cat/c",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="D", url="http://www.zpeliculas.com/cat/d",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="E", url="http://www.zpeliculas.com/cat/e",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="F", url="http://www.zpeliculas.com/cat/f",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="G", url="http://www.zpeliculas.com/cat/g",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="H", url="http://www.zpeliculas.com/cat/h",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="I", url="http://www.zpeliculas.com/cat/i",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="J", url="http://www.zpeliculas.com/cat/j",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="K", url="http://www.zpeliculas.com/cat/k",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="L", url="http://www.zpeliculas.com/cat/l",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="M", url="http://www.zpeliculas.com/cat/m",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="N", url="http://www.zpeliculas.com/cat/n",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="O", url="http://www.zpeliculas.com/cat/o",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="P", url="http://www.zpeliculas.com/cat/p",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Q", url="http://www.zpeliculas.com/cat/q",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="R", url="http://www.zpeliculas.com/cat/r",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="S", url="http://www.zpeliculas.com/cat/s",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="T", url="http://www.zpeliculas.com/cat/t",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="U", url="http://www.zpeliculas.com/cat/u",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="V", url="http://www.zpeliculas.com/cat/v",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="W", url="http://www.zpeliculas.com/cat/w",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="X", url="http://www.zpeliculas.com/cat/x",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Y", url="http://www.zpeliculas.com/cat/y",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Z", url="http://www.zpeliculas.com/cat/z",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="0", url="http://www.zpeliculas.com/cat/0",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="1", url="http://www.zpeliculas.com/cat/1",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="2", url="http://www.zpeliculas.com/cat/2",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="3", url="http://www.zpeliculas.com/cat/3",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="4", url="http://www.zpeliculas.com/cat/4",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="5", url="http://www.zpeliculas.com/cat/5",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="6", url="http://www.zpeliculas.com/cat/6",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="7", url="http://www.zpeliculas.com/cat/7",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="8", url="http://www.zpeliculas.com/cat/8",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="9", url="http://www.zpeliculas.com/cat/9",
viewmode="movie"))
return itemlist
def generos(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="peliculas", title="Acción",
url="http://www.zpeliculas.com/peliculas/p-accion/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Animación",
url="http://www.zpeliculas.com/peliculas/p-animacion/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Aventura",
url="http://www.zpeliculas.com/peliculas/p-aventura/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Biografía",
url="http://www.zpeliculas.com/peliculas/p-biografia/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Bélico",
url="http://www.zpeliculas.com/peliculas/p-belico/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Ciencia Ficción",
url="http://www.zpeliculas.com/peliculas/p-cienciaficcion/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Comedia",
url="http://www.zpeliculas.com/peliculas/p-comedia/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Crimen",
url="http://www.zpeliculas.com/peliculas/p-crimen/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Drama",
url="http://www.zpeliculas.com/peliculas/p-drama/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Fantasía",
url="http://www.zpeliculas.com/peliculas/p-fantasia/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Histórico",
url="http://www.zpeliculas.com/peliculas/p-historico/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Intriga",
url="http://www.zpeliculas.com/peliculas/p-intriga/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Musical",
url="http://www.zpeliculas.com/peliculas/p-musical/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Romántica",
url="http://www.zpeliculas.com/peliculas/p-romantica/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Terror",
url="http://www.zpeliculas.com/peliculas/p-terror/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Thriller",
url="http://www.zpeliculas.com/peliculas/p-thriller/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Western",
url="http://www.zpeliculas.com/peliculas/p-western/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Otros",
url="http://www.zpeliculas.com/peliculas/p-otros/", viewmode="movie"))
return itemlist
def search(item, texto):
try:
post = urllib.urlencode({"story": texto, "do": "search", "subaction": "search", "x": "0", "y": "0"})
data = scrapertools.cache_page("http://www.zpeliculas.com", post=post)
patron = '<div class="leftpane">(.*?)<div class="clear"'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for match in matches:
scrapedtitle = scrapertools.find_single_match(match, '<div class="shortname">([^<]+)</div>')
scrapedurl = scrapertools.find_single_match(match, '<a href="([^"]+)"')
scrapedthumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
scrapedyear = scrapertools.find_single_match(match, '<div class="year"[^>]+>([^<]+)</div>')
scrapedidioma = scrapertools.find_single_match(match, 'title="Idioma">([^<]+)</div>')
scrapedcalidad = scrapertools.find_single_match(match,
'<div class="shortname"[^<]+</div[^<]+<div[^>]+>([^<]+)</div>')
title = scrapedtitle + ' (' + scrapedyear + ') [' + scrapedidioma + '] [' + scrapedcalidad + ']'
url = scrapedurl
thumbnail = scrapedthumbnail
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title,
contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = "http://www.zpeliculas.com"
elif categoria == 'infantiles':
item.url = "http://www.zpeliculas.com/peliculas/p-animacion/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].extra == "next_page":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
# Descarga la página
body = scrapertools.cachePage(item.url)
data = scrapertools.get_match(body,
'<div class="shortmovies">(.*?)<div class="navigation ignore-select" align="center">')
'''
<div class="leftpane">
<div class="movieposter" title="Descargar Sólo los amantes sobreviven">
<a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html"><img src="http://i.imgur.com/NBPgXrp.jpg" width="110" height="150" alt="Sólo los amantes sobreviven" title="Descargar Sólo los amantes sobreviven" /></a>
<div class="shortname">Sólo los amantes sobreviven</div>
<div class="BDRip">BDRip</div>
</div>
</div>
<div class="rightpane">
<div style="display:block;overflow:hidden;">
<h2 class="title" title="Sólo los amantes sobreviven"><a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html">Sólo los amantes sobreviven</a></h2>
<div style="height:105px; overflow:hidden;">
<div class="small">
<div class="cats" title="Genero"><a href="http://www.zpeliculas.com/peliculas/p-drama/">Drama</a>, <a href="http://www.zpeliculas.com/peliculas/p-fantasia/">Fantasia</a>, <a href="http://www.zpeliculas.com/peliculas/p-romantica/">Romantica</a></div>
<div class="year" title="A&ntilde;o">2013</div>
<div class="ESP" title="Idioma">ESP</div>
<div class="FA" title="Sólo los amantes sobreviven FA Official Website"><a href="http://www.filmaffinity.com/es/film851633.html" target="_blank" title="Sólo los amantes sobreviven en filmaffinity">Sólo los amantes sobreviven en FA</a></div>
</div>
</div>
<div class="clear" style="height:2px;"></div>
<div style="float:right">
'''
patron = '<div class="leftpane">(.*?)<div style="float\:right">'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for match in matches:
scrapedurl = scrapertools.find_single_match(match, '<a href="([^"]+)"')
scrapedthumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
scrapedtitle = scrapertools.find_single_match(match, '<div class="shortname">([^<]+)')
scrapedcalidad = scrapertools.find_single_match(match,
'<div class="shortname">[^<]+</div[^<]+<div class="[^"]+">([^<]+)')
scrapedyear = scrapertools.find_single_match(match, '<div class="year[^>]+>([^<]+)')
scrapedidioma = scrapertools.find_single_match(match,
'<div class="year[^>]+>[^<]+</div[^<]+<div class[^>]+>([^<]+)')
contentTitle = scrapertools.htmlclean(scrapedtitle)
# logger.info("title="+scrapedtitle)
title = contentTitle + ' (' + scrapedyear + ') [' + scrapedidioma + '] [' + scrapedcalidad + ']'
# title = scrapertools.htmlclean(title)
url = scrapedurl
thumbnail = scrapedthumbnail
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
hasContentDetails=True, contentTitle=contentTitle, contentThumbnail=thumbnail, fanart=thumbnail,
contentType="movie", context=["buscar_trailer"]))
next_page = scrapertools.find_single_match(body, '<a href="([^"]+)">Siguiente')
if next_page != "":
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page, thumbnail="",
plot="", show="", viewmode="movie", fanart=thumbnail, extra="next_page"))
return itemlist
def destacadas(item):
logger.info()
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data, '<div id="sliderwrapper">(.*?)<div class="genreblock">')
'''
<div class="imageview view-first">
<a href="/templates/mytopV2/blockpro/noimage-full.png" onclick="return hs.expand(this)"><img src="http://i.imgur.com/H4d96Wn.jpg" alt="Ocho apellidos vascos"></a>
<div class="mask">
<h2><a href="/peliculas/p-comedia/1403-ocho-apellidos-vascos.html" title="Ocho apellidos vascos">Ocho apellidos vascos</a></h2>
</div>
'''
patron = '<div class="imageview view-first">.*?<a href=.*?>.*?src="(.*?)" alt="(.*?)"></a>.*?<h2><a href="(.*?)".*?</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist = []
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
logger.info("title=" + scrapedtitle)
title = scrapedtitle
title = scrapertools.htmlclean(title)
url = "http://www.zpeliculas.com" + scrapedurl
thumbnail = scrapedthumbnail
plot = ""
plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
return itemlist
def sugeridas(item):
logger.info()
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data, '<ul class="links">(.*?)</ul>')
'''
<li><a href="/peliculas/p-accion/425-instinto-asesino.html" title="Descargar Instinto asesino (The Crew)"><span class="movie-name">Instinto asesino (The Crew)</span><img src="http://i.imgur.com/1xXLz.jpg" width="102" height="138" alt="Instinto asesino (The Crew)" title="Descargar Instinto asesino (The Crew)" /></a></li>
'''
patron = '<li>.*?<a href="(.*?)".*?"movie-name">(.*?)</span><img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
logger.info("title=" + scrapedtitle)
title = scrapedtitle
title = scrapertools.htmlclean(title)
url = "http://www.zpeliculas.com" + scrapedurl
thumbnail = scrapedthumbnail
plot = ""
plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
return itemlist
def findvideos(item):
logger.info("item=" + item.tostring())
# Descarga la página para obtener el argumento
data = scrapertools.cachePage(item.url)
item.plot = scrapertools.find_single_match(data, '<div class="contenttext">([^<]+)<').strip()
item.contentPlot = item.plot
logger.info("plot=" + item.plot)
return servertools.find_video_items(item=item, data=data)

View File

@@ -0,0 +1,43 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://bdupload.info/[A-z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "bdupload",
"name": "bdupload",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s18.postimg.org/68colqvyx/logo-bdupload.png",
"version": 1
}

View File

@@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-
import time
from core import httptools
from core import scrapertools
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Archive no Encontrado" in data:
return False, "[bdupload] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
post = ""
patron = '(?s)type="hidden" name="([^"]+)".*?value="([^"]*)"'
match = scrapertools.find_multiple_matches(data, patron)
for nombre, valor in match:
post += nombre + "=" + valor + "&"
time.sleep(1)
data1 = httptools.downloadpage(page_url, post = post, headers = headers).data
patron = "window.open\('([^']+)"
file = scrapertools.find_single_match(data1, patron)
file += "|User-Agent=" + headers['User-Agent']
video_urls = []
videourl = file
video_urls.append([".MP4 [bdupload]", videourl])
return video_urls

View File

@@ -0,0 +1,43 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://biter.tv/v/[A-z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "bitertv",
"name": "Bitertv",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s18.postimg.org/f56rayqq1/logo-bitertv.png",
"version": 1
}

View File

@@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Archive no Encontrado" in data:
return False, "[bitertv] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
patron = "(?s)file: '([^']+)"
file = scrapertools.find_single_match(data, patron)
video_urls.append([".MP4 [bitertv]", file])
return video_urls

File diff suppressed because one or more lines are too long

View File

@@ -1,6 +1,9 @@
# -*- coding: utf-8 -*-
import re
import base64
import urllib
from core import httptools
from core import scrapertools
@@ -25,12 +28,11 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
referer = re.sub(r"embed-|player-", "", page_url)[:-5]
referer = page_url.replace('iframe', 'preview')
data = httptools.downloadpage(page_url, headers={'Referer': referer}).data
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
_0xd003 = scrapertools.find_single_match(data, 'var _0xd003=(\[[^;]+\]);')
_0xd003 = scrapertools.find_single_match(data, 'var _0x[0-f]+=(\[[^;]+\]);')
video_urls = []
url = scrapertools.find_single_match(unpacked, '(http[^,]+\.mp4)')
@@ -45,7 +47,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
class S:
def __init__(self, _0xd003):
def __init__(self, var):
self.r = None
self.s = None
self.k = None
@@ -54,8 +56,48 @@ class S:
self.b = None
self.d = None
_0xd003 = eval(_0xd003)
self.t(_0xd003[13] + _0xd003[14] + _0xd003[13] + _0xd003[14], _0xd003[15])
var = eval(var)
for x in range(0x1f0, 0, -1):
var.append(var.pop(0))
self.var = var
self.t(
self.decode_index('0x22', '!UJH') +
self.decode_index('0x23', 'NpE)') +
self.decode_index('0x24', '4uT2') +
self.decode_index('0x23', 'NpE)'),
self.decode_index('0x25', '@ZC2')
)
def decode_index(self, index, key):
b64_data = self.var[int(index, 16)];
result = ''
_0xb99338 = 0x0
_0x25e3f4 = 0x0
data = base64.b64decode(b64_data)
data = urllib.unquote(data).decode('utf8')
_0x5da081 = [x for x in range(0x100)]
for x in range(0x100):
_0xb99338 = (_0xb99338 + _0x5da081[x] + ord(key[x % len(key)])) % 0x100
_0x139847 = _0x5da081[x]
_0x5da081[x] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
_0xb99338 = 0x0
for _0x11ebc5 in range(len(data)):
_0x25e3f4 = (_0x25e3f4 + 0x1) % 0x100
_0xb99338 = (_0xb99338 + _0x5da081[_0x25e3f4]) % 0x100
_0x139847 = _0x5da081[_0x25e3f4]
_0x5da081[_0x25e3f4] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
result += chr(ord(data[_0x11ebc5]) ^ _0x5da081[(_0x5da081[_0x25e3f4] + _0x5da081[_0xb99338]) % 0x100])
return result
def decode(self, url):
_hash = re.compile('[A-z0-9_-]{40,}', re.DOTALL).findall(url)[0]

View File

@@ -2,6 +2,7 @@
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
@@ -21,7 +22,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
data = httptools.downloadpage(page_url).data
media_url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
packed = scrapertools.find_single_match(data, "function\(p,a,c,k.*?</script>")
unpacked = jsunpack.unpack(packed)
media_url = scrapertools.find_single_match(unpacked, 'src"value="([^"]+)')
if not media_url:
id_ = page_url.rsplit("/", 1)[1]
rand = scrapertools.find_single_match(data, 'name="rand" value="([^"]+)"')