Merge pull request #129 from Intel11/patch-4

Actualizado
This commit is contained in:
Alfa
2017-10-14 20:17:55 -04:00
committed by GitHub
18 changed files with 366 additions and 822 deletions

View File

@@ -92,9 +92,14 @@ def peliculas(item):
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumbnail, titulo, varios in matches:
idioma = scrapertools.find_single_match(varios, '(?s)Idioma.*?kinopoisk">([^<]+)')
number_idioma = scrapertools.find_single_match(idioma, '[0-9]')
mtitulo = titulo
if number_idioma != "":
idioma = ""
else:
mtitulo += " (" + idioma + ")"
year = scrapertools.find_single_match(varios, 'Año.*?kinopoisk">([^<]+)')
year = scrapertools.find_single_match(year, '[0-9]{4}')
mtitulo = titulo + " (" + idioma + ")"
if year:
mtitulo += " (" + year + ")"
item.infoLabels['year'] = int(year)

View File

@@ -0,0 +1,36 @@
{
"id": "animeyt",
"name": "AnimeYT",
"active": true,
"adult": false,
"language": "es",
"thumbnail": "http://i.imgur.com/dHpupFk.png",
"version": 1,
"changes": [
{
"date": "17/05/2017",
"description": "Fix novedades y replace en findvideos"
}
],
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "información extra",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,187 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
from platformcode import config,logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animeyt')
HOST = "http://animeyt.tv/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Novedades", action="novedades", url=HOST))
itemlist.append(Item(channel=item.channel, title="Recientes", action="recientes", url=HOST))
itemlist.append(Item(channel=item.channel, title="Alfabético", action="alfabetico", url=HOST))
itemlist.append(Item(channel=item.channel, title="Búsqueda", action="search", url=urlparse.urljoin(HOST, "busqueda?terminos=")))
return itemlist
def novedades(item):
logger.info()
itemlist = list()
if not item.pagina:
item.pagina = 0
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron_novedades = '<div class="capitulos-portada">[\s\S]+?<h2>Comentarios</h2>'
data_novedades = scrapertools.find_single_match(data, patron_novedades)
patron = 'href="([^"]+)"[\s\S]+?src="([^"]+)"[^<]+alt="([^"]+) (\d+)([^"]+)'
matches = scrapertools.find_multiple_matches(data_novedades, patron)
for url, img, scrapedtitle, eps, info in matches[item.pagina:item.pagina + 20]:
title = scrapedtitle + " " + "1x" + eps + info
title = title.replace("Sub Español", "").replace("sub español", "")
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, title=title, url=url, thumb=img, action="findvideos", contentTitle=scrapedtitle, contentSerieName=scrapedtitle, infoLabels=infoLabels, contentType="tvshow"))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for it in itemlist:
it.thumbnail = it.thumb
except:
pass
if len(matches) > item.pagina + 20:
pagina = item.pagina + 20
itemlist.append(item.clone(channel=item.channel, action="novedades", url=item.url, title=">> Página Siguiente", pagina=pagina))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ':
titulo = letra
if letra == "0":
letra = "num"
itemlist.append(Item(channel=item.channel, action="recientes", title=titulo,
url=urlparse.urljoin(HOST, "animes?tipo=0&genero=0&anio=0&letra={letra}".format(letra=letra))))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return recientes(item)
def recientes(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron_recientes = '<article class="anime">[\s\S]+?</main>'
data_recientes = scrapertools.find_single_match(data, patron_recientes)
patron = '<a href="([^"]+)"[^<]+<img src="([^"]+)".+?js-synopsis-reduce">(.*?)<.*?<h3 class="anime__title">(.*?)<small>(.*?)</small>'
matches = scrapertools.find_multiple_matches(data_recientes, patron)
for url, thumbnail, plot, title, cat in matches:
itemlist.append(item.clone(title=title, url=url, action="episodios", show=title, thumbnail=thumbnail, plot=plot, cat=cat))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
paginacion = scrapertools.find_single_match(data, '<a class="pager__link icon-derecha last" href="([^"]+)"')
paginacion = scrapertools.decodeHtmlentities(paginacion)
if paginacion:
itemlist.append(Item(channel=item.channel, action="recientes", title=">> Página Siguiente", url=paginacion))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<span class="icon-triangulo-derecha"></span>.*?<a href="([^"]+)">([^"]+) (\d+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, scrapedtitle, episode in matches:
title = "1x" + episode + " " + "Episodio"
itemlist.append(item.clone(title=title, url=url, action='findvideos'))
if config.get_videolibrary_support:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
duplicados = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'Player\("(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
if "cldup" in url:
title = "Opcion Cldup"
if "chumi" in url:
title = "Opcion Chumi"
itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=url))
if item.extra != "library":
if config.get_videolibrary_support() and item.extra:
itemlist.append(item.clone(channel=item.channel, title="[COLOR yellow]Añadir pelicula a la videoteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="library", contentTitle=item.show, contentType="movie"))
return itemlist
def player(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, add_referer=True).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
url = scrapertools.find_single_match(data, 'sources: \[{file:\'(.*?)\'')
itemlist = servertools.find_video_items(data=data)
return itemlist

View File

@@ -23,12 +23,12 @@ def mainlist(item):
itemlist = []
item.text_color = color1
itemlist.append(item.clone(title="Novedades", action="entradas",
url="http://www.area-documental.com/resultados-reciente.php?buscar=&genero=",
url= host + "/resultados-reciente.php?buscar=&genero=",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Destacados", action="entradas",
url="http://www.area-documental.com/resultados-destacados.php?buscar=&genero=",
url= host + "/resultados-destacados.php?buscar=&genero=",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Categorías", action="cat", url="http://www.area-documental.com/index.php",
itemlist.append(item.clone(title="Categorías", action="cat", url= host + "/index.php",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Ordenados por...", action="indice", fanart="http://i.imgur.com/Q7fsFI6.png"))
@@ -47,7 +47,7 @@ def configuracion(item):
def search(item, texto):
logger.info()
item.url = "http://www.area-documental.com/resultados.php?buscar=%s&genero=&x=0&y=0" % texto
item.url = host + "/resultados.php?buscar=%s&genero=&x=0&y=0" % texto
item.action = "entradas"
try:
itemlist = entradas(item)
@@ -65,7 +65,7 @@ def newest(categoria):
item = Item()
try:
if categoria == "documentales":
item.url = "http://www.area-documental.com/resultados-reciente.php?buscar=&genero="
item.url = host + "/resultados-reciente.php?buscar=&genero="
item.action = "entradas"
itemlist = entradas(item)
@@ -86,9 +86,9 @@ def indice(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Título", action="entradas",
url="http://www.area-documental.com/resultados-titulo.php?buscar=&genero="))
url= host + "/resultados-titulo.php?buscar=&genero="))
itemlist.append(item.clone(title="Año", action="entradas",
url="http://www.area-documental.com/resultados-anio.php?buscar=&genero="))
url= host + "/resultados-anio.php?buscar=&genero="))
return itemlist
@@ -125,9 +125,13 @@ def entradas(item):
data2 = ""
data = data.replace("\n", "").replace("\t", "")
patron = '<div id="peliculas">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?' \
'target="_blank">(.*?)</a>(.*?)<p>(.*?)</p>' \
'.*?</strong>: (.*?)<strong>.*?</strong>(.*?)</div>'
patron = '(?s)<div id="peliculas">.*?a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
patron += 'target="_blank">(.*?)</a></span>'
patron += '(.*?)<p>'
patron += '(.*?)</p>.*?'
patron += '</strong>:(.*?)<strong>.*?'
patron += '</strong>(.*?)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot, genero, extra in matches:
infolab = {'plot': scrapedplot, 'genre': genero}
@@ -200,6 +204,5 @@ def play(item):
extension = item.url.rsplit("|", 1)[0][-4:]
itemlist.append(['%s %s [directo]' % (extension, item.calidad), item.url, 0, subtitle])
# itemlist.append(item.clone(subtitle=subtitle))
return itemlist

View File

@@ -298,6 +298,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "player" in url:
scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
if "ok" in scrapedserver: scrapedserver = "okru"
matches.append([url, scrapedserver, "", language.strip(), t_tipo])
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
bloque2 = bloque2.replace("\t", "").replace("\r", "")
@@ -347,10 +348,12 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
def play(item):
logger.info()
itemlist = []
if "api.cinetux" in item.url:
if "api.cinetux" in item.url or item.server == "okru":
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id
if item.server == "okru":
item.url = "https://ok.ru/videoembed/" + id
elif "links" in item.url or "www.cinetux.me" in item.url:
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')

View File

@@ -1,93 +0,0 @@
{
"id": "copiapop",
"name": "Copiapop/Diskokosmiko",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,
"changes": [
{
"date": "15/03/2017",
"autor": "SeiTaN",
"description": "limpieza código"
},
{
"date": "16/02/2017",
"autor": "Cmos",
"description": "Primera versión"
}
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"banner": "copiapop.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "copiapopuser",
"type": "text",
"color": "0xFF25AA48",
"label": "Usuario Copiapop",
"enabled": true,
"visible": true
},
{
"id": "copiapoppassword",
"type": "text",
"color": "0xFF25AA48",
"hidden": true,
"label": "Password Copiapop",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "diskokosmikouser",
"type": "text",
"color": "0xFFC52020",
"label": "Usuario Diskokosmiko",
"enabled": true,
"visible": true
},
{
"id": "diskokosmikopassword",
"type": "text",
"color": "0xFFC52020",
"hidden": true,
"label": "Password Diskokosmiko",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "adult_content",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Mostrar contenido adulto en las búsquedas",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,426 +0,0 @@
# -*- coding: utf-8 -*-
import re
import threading
from core import filetools
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
__perfil__ = config.get_setting('perfil', "copiapop")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ - 1 >= 0:
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = color4 = color5 = ""
adult_content = config.get_setting("adult_content", "copiapop")
def login(pagina):
logger.info()
try:
user = config.get_setting("%suser" % pagina.split(".")[0], "copiapop")
password = config.get_setting("%spassword" % pagina.split(".")[0], "copiapop")
if pagina == "copiapop.com":
if user == "" and password == "":
return False, "Para ver los enlaces de copiapop es necesario registrarse en copiapop.com"
elif user == "" or password == "":
return False, "Copiapop: Usuario o contraseña en blanco. Revisa tus credenciales"
else:
if user == "" or password == "":
return False, "DiskoKosmiko: Usuario o contraseña en blanco. Revisa tus credenciales"
data = httptools.downloadpage("http://%s" % pagina).data
if re.search(r'(?i)%s' % user, data):
return True, ""
token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"')
post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password)
headers = {'X-Requested-With': 'XMLHttpRequest'}
url_log = "http://%s/action/Account/Login" % pagina
data = httptools.downloadpage(url_log, post, headers).data
if "redirectUrl" in data:
logger.info("Login correcto")
return True, ""
else:
logger.error("Error en el login")
return False, "Nombre de usuario no válido. Comprueba tus credenciales"
except:
import traceback
logger.error(traceback.format_exc())
return False, "Error durante el login. Comprueba tus credenciales"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
logueado, error_message = login("copiapop.com")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
else:
item.extra = "http://copiapop.com"
itemlist.append(item.clone(title="Copiapop", action="", text_color=color2))
itemlist.append(
item.clone(title=" Búsqueda", action="search", url="http://copiapop.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://copiapop.com/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://copiapop.com/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
item.extra = "http://diskokosmiko.mx/"
itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2))
itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Colecciones", action="colecciones",
url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1"))
itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro",
url="http://diskokosmiko.mx/action/SearchFiles"))
itemlist.append(item.clone(title=" Mi cuenta", action="cuenta"))
itemlist.append(item.clone(action="", title=""))
folder_thumb = filetools.join(config.get_data_path(), 'thumbs_copiapop')
files = filetools.listdir(folder_thumb)
if files:
itemlist.append(
item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red"))
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
return itemlist
def search(item, texto):
logger.info()
item.post = "Mode=List&Type=Video&Phrase=%s&SizeFrom=0&SizeTo=0&Extension=&ref=pager&pageNumber=1" % texto.replace(
" ", "+")
try:
return listado(item)
except:
import sys, traceback
for line in sys.exc_info():
logger.error("%s" % line)
logger.error(traceback.format_exc())
return []
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def listado(item):
logger.info()
itemlist = []
data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data
if not item.post:
data_thumb = ""
item.url = item.url.replace("/gallery,", "/list,")
data = httptools.downloadpage(item.url, item.post).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
patron = '<div class="size">(.*?)</div></div></div>'
bloques = scrapertools.find_multiple_matches(data, patron)
for block in bloques:
if "adult_info" in block and not adult_content:
continue
size = scrapertools.find_single_match(block, '<p>([^<]+)</p>')
scrapedurl, scrapedtitle = scrapertools.find_single_match(block,
'<div class="name"><a href="([^"]+)".*?>([^<]+)<')
scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'")
if scrapedthumbnail:
try:
thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?")
if data_thumb:
url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb)
else:
url_thumb = scrapedthumbnail
scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:])
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png"
scrapedurl = item.extra + scrapedurl
title = "%s (%s)" % (scrapedtitle, size)
if "adult_info" in block:
title += " [COLOR %s][+18][/COLOR]" % color4
plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>')
if plot:
plot = scrapertools.decodeHtmlentities(plot)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2,
extra=item.extra, infoLabels={'plot': plot}, post=item.post)
if item.post:
try:
new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block,
'<p class="folder"><a href="([^"]+)".*?>([^<]+)<')
except:
pass
else:
new_item.folderurl = item.url.rsplit("/", 1)[0]
new_item.foldername = item.foldername
new_item.fanart = item.thumbnail
itemlist.append(new_item)
next_page = scrapertools.find_single_match(data, '<div class="pageSplitterBorder" data-nextpage-number="([^"]+)"')
if next_page:
if item.post:
post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)
url = item.url
else:
url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url)
post = ""
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page,
url=url, post=post, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="copiapop"))
usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra)
url_usuario = item.extra + "/" + usuario
if item.folderurl and not item.folderurl.startswith(item.extra):
item.folderurl = item.extra + item.folderurl
if item.post:
itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername,
url=item.folderurl + "/gallery,1,1?ref=pager", post=""))
data = httptools.downloadpage(item.folderurl).data
token = scrapertools.find_single_match(data,
'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"')
collection_id = item.folderurl.rsplit("-", 1)[1]
post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id)
url = "%s/action/Follow/Follow" % item.extra
title = "Seguir Colección: %s" % item.foldername
if "dejar de seguir" in data:
title = "Dejar de seguir la colección: %s" % item.foldername
url = "%s/action/Follow/UnFollow" % item.extra
itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False))
itemlist.append(
item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario))
return itemlist
def colecciones(item):
logger.info()
from core import jsontools
itemlist = []
usuario = False
data = httptools.downloadpage(item.url).data
if "Ver colecciones del usuario" not in item.title and not item.index:
data = jsontools.load(data)["Data"]
content = data["Content"]
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
else:
usuario = True
if item.follow:
content = scrapertools.find_single_match(data,
'id="followed_collections"(.*?)<div id="recommended_collections"')
else:
content = scrapertools.find_single_match(data,
'<div id="collections".*?<div class="collections_list(.*?)<div class="collections_list')
content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
patron = '<a class="name" href="([^"]+)".*?>([^<]+)<.*?src="([^"]+)".*?<p class="info">(.*?)</p>'
matches = scrapertools.find_multiple_matches(content, patron)
index = ""
if item.index and item.index != "0":
matches = matches[item.index:item.index + 20]
if len(matches) > item.index + 20:
index = item.index + 20
elif len(matches) > 20:
matches = matches[:20]
index = 20
folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
for url, scrapedtitle, thumb, info in matches:
url = item.extra + url + "/gallery,1,1?ref=pager"
title = "%s (%s)" % (scrapedtitle, scrapertools.htmlclean(info))
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("e=", 1)[1][-20:])
except:
try:
scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("/thumbnail/", 1)[1][-20:])
thumb = thumb.replace("/thumbnail/", "/")
except:
scrapedthumbnail = ""
if scrapedthumbnail:
t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, thumb])
t.setDaemon(True)
t.start()
else:
scrapedthumbnail = thumb
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
thumbnail=scrapedthumbnail, text_color=color2, extra=item.extra,
foldername=scrapedtitle))
if not usuario and data.get("NextPageUrl"):
url = item.extra + data["NextPageUrl"]
itemlist.append(item.clone(title=">> Página Siguiente", url=url, text_color=""))
elif index:
itemlist.append(item.clone(title=">> Página Siguiente", url=item.url, index=index, text_color=""))
return itemlist
def seguir(item):
logger.info()
data = httptools.downloadpage(item.url, item.post)
message = "Colección seguida"
if "Dejar" in item.title:
message = "La colección ya no se sigue"
if data.sucess and config.get_platform() != "plex":
from platformcode import platformtools
platformtools.dialog_notification("Acción correcta", message)
def cuenta(item):
logger.info()
import urllib
itemlist = []
web = "copiapop"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
logueado, error_message = login("diskokosmiko.mx")
if not logueado:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
return itemlist
user = config.get_setting("%suser" % web, "copiapop")
user = unicode(user, "utf8").lower().encode("utf8")
url = item.extra + "/" + urllib.quote(user)
data = httptools.downloadpage(url).data
num_col = scrapertools.find_single_match(data, 'name="Has_collections" value="([^"]+)"')
if num_col != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Ver mis colecciones",
text_color=color5))
else:
itemlist.append(item.clone(action="", title="No tienes ninguna colección", text_color=color4))
num_follow = scrapertools.find_single_match(data, 'name="Follows_collections" value="([^"]+)"')
if num_follow != "0":
itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Colecciones que sigo",
text_color=color5, follow=True))
else:
itemlist.append(item.clone(action="", title="No sigues ninguna colección", text_color=color4))
return itemlist
def filtro(item):
logger.info()
list_controls = []
valores = {}
dict_values = None
list_controls.append({'id': 'search', 'label': 'Texto a buscar', 'enabled': True, 'color': '0xFFC52020',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tipo', 'label': 'Tipo de búsqueda', 'enabled': True, 'color': '0xFFFF8000',
'type': 'list', 'default': -1, 'visible': True})
list_controls[1]['lvalues'] = ['Aplicación', 'Archivo', 'Documento', 'Imagen', 'Música', 'Vídeo', 'Todos']
valores['tipo'] = ['Application', 'Archive', 'Document', 'Image', 'Music', 'Video', '']
list_controls.append({'id': 'ext', 'label': 'Extensión', 'enabled': True, 'color': '0xFFF4FA58',
'type': 'text', 'default': '', 'visible': True})
list_controls.append({'id': 'tmin', 'label': 'Tamaño mínimo (MB)', 'enabled': True, 'color': '0xFFCC2EFA',
'type': 'text', 'default': '0', 'visible': True})
list_controls.append({'id': 'tmax', 'label': 'Tamaño máximo (MB)', 'enabled': True, 'color': '0xFF2ECCFA',
'type': 'text', 'default': '0', 'visible': True})
# Se utilizan los valores por defecto/guardados
web = "copiapop"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
valores_guardados = config.get_setting("filtro_defecto_" + web, item.channel)
if valores_guardados:
dict_values = valores_guardados
item.valores = valores
from platformcode import platformtools
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption="Filtra la búsqueda", item=item, callback='filtrado')
def filtrado(item, values):
values_copy = values.copy()
web = "copiapop"
if "diskokosmiko" in item.extra:
web = "diskokosmiko"
# Guarda el filtro para que sea el que se cargue por defecto
config.set_setting("filtro_defecto_" + web, values_copy, item.channel)
tipo = item.valores["tipo"][values["tipo"]]
search = values["search"]
ext = values["ext"]
tmin = values["tmin"]
tmax = values["tmax"]
if not tmin.isdigit():
tmin = "0"
if not tmax.isdigit():
tmax = "0"
item.valores = ""
item.post = "Mode=List&Type=%s&Phrase=%s&SizeFrom=%s&SizeTo=%s&Extension=%s&ref=pager&pageNumber=1" \
% (tipo, search, tmin, tmax, ext)
item.action = "listado"
return listado(item)
def download_thumb(filename, url):
from core import downloadtools
lock = threading.Lock()
lock.acquire()
folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
if not filetools.exists(folder):
filetools.mkdir(folder)
lock.release()
if not filetools.exists(filename):
downloadtools.downloadfile(url, filename, silent=True)
return filename
def delete_cache(url):
folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
filetools.rmdirtree(folder)
if config.is_xbmc():
import xbmc
xbmc.executebuiltin("Container.Refresh")

View File

@@ -49,8 +49,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url=host, folder=True))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar..."))
if not account:
itemlist.append(Item(channel=item.channel, title=bbcode_kodi2html(
"[COLOR orange][B]Habilita tu cuenta para activar los items de usuario...[/B][/COLOR]"),
itemlist.append(Item(channel=item.channel, title="[COLOR orange][B]Habilita tu cuenta para activar los items de usuario...[/B][/COLOR]",
action="settingCanal", url=""))
else:
login()
@@ -66,10 +65,10 @@ def menupeliculas(item):
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Favoritos[/B][/COLOR]"),
title="[COLOR orange][B]Favoritos[/B][/COLOR]",
url=host + "/a/my?target=movies&action=favorite&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Pendientes[/B][/COLOR]"),
title="[COLOR orange][B]Pendientes[/B][/COLOR]",
url=host + "/a/my?target=movies&action=pending&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="fichas", title="ABC", url=host + "/peliculas/abc", folder=True))
@@ -86,7 +85,7 @@ def menupeliculas(item):
itemlist.append(Item(channel=item.channel, action="generos", title="Películas por Género", url=host, folder=True))
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Vistas[/B][/COLOR]"),
title="[COLOR orange][B]Vistas[/B][/COLOR]",
url=host + "/a/my?target=movies&action=seen&start=-28&limit=28", folder=True))
return itemlist
@@ -99,10 +98,10 @@ def menuseries(item):
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Siguiendo[/B][/COLOR]"),
title="[COLOR orange][B]Siguiendo[/B][/COLOR]",
url=host + "/a/my?target=shows&action=following&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Para Ver[/B][/COLOR]"),
title="[COLOR orange][B]Para Ver[/B][/COLOR]",
url=host + "/a/my?target=shows&action=watch&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="series_abc", title="A-Z", folder=True))
@@ -123,13 +122,13 @@ def menuseries(item):
url=host + "/series/list", folder=True))
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Favoritas[/B][/COLOR]"),
title="[COLOR orange][B]Favoritas[/B][/COLOR]",
url=host + "/a/my?target=shows&action=favorite&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Pendientes[/B][/COLOR]"),
title="[COLOR orange][B]Pendientes[/B][/COLOR]",
url=host + "/a/my?target=shows&action=pending&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title=bbcode_kodi2html("[COLOR orange][B]Vistas[/B][/COLOR]"),
title="[COLOR orange][B]Vistas[/B][/COLOR]",
url=host + "/a/my?target=shows&action=seen&start=-28&limit=28", folder=True))
return itemlist
@@ -222,7 +221,7 @@ def items_usuario(item):
serie = ficha['show_title']['en'].strip()
temporada = ficha['season']
episodio = ficha['episode']
serie = bbcode_kodi2html("[COLOR whitesmoke][B]" + serie + "[/B][/COLOR]")
serie = "[COLOR whitesmoke][B]" + serie + "[/B][/COLOR]"
if len(episodio) == 1: episodio = '0' + episodio
try:
title = temporada + "x" + episodio + " - " + serie + ": " + title
@@ -286,9 +285,8 @@ def fichas(item):
if len(s_p) == 1:
data = s_p[0]
if 'Lo sentimos</h3>' in s_p[0]:
return [Item(channel=item.channel, title=bbcode_kodi2html(
"[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20',
' ') + "[/COLOR] sin resultados"))]
return [Item(channel=item.channel, title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20',
' ') + "[/COLOR] sin resultados")]
else:
data = s_p[0] + s_p[1]
else:
@@ -321,12 +319,12 @@ def fichas(item):
if scrapedlangs != ">":
textoidiomas, language = extrae_idiomas(scrapedlangs)
#Todo Quitar el idioma
title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])")
title += " ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])"
if scrapedrating != ">":
valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>', r'\1,\2', scrapedrating)
infoLabels['rating']=valoracion
title += bbcode_kodi2html(" ([COLOR orange]" + valoracion + "[/COLOR])")
title += " ([COLOR orange]" + valoracion + "[/COLOR])"
url = urlparse.urljoin(item.url, scrapedurl)
@@ -346,7 +344,7 @@ def fichas(item):
if item.title == "Buscar...":
tag_type = scrapertools.get_match(url, 'l.tv/([^/]+)/')
title += bbcode_kodi2html(" - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]")
title += " - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]"
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
@@ -388,7 +386,7 @@ def episodios(item):
str = get_status(status, "shows", id)
if str != "" and account and item.category != "Series" and "XBMC" not in item.title:
if config.get_videolibrary_support():
title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )")
title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=False))
@@ -397,11 +395,11 @@ def episodios(item):
thumbnail=item.thumbnail, show=item.show, folder=True))
elif account and item.category != "Series" and "XBMC" not in item.title:
if config.get_videolibrary_support():
title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )")
title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=False))
title = bbcode_kodi2html(" ( [COLOR orange][B]Seguir[/B][/COLOR] )")
title = " ( [COLOR orange][B]Seguir[/B][/COLOR] )"
itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
@@ -436,7 +434,7 @@ def episodios(item):
idiomas = "( [COLOR teal][B]"
for idioma in episode['languages']: idiomas += idioma + " "
idiomas += "[/B][/COLOR])"
idiomas = bbcode_kodi2html(idiomas)
idiomas = idiomas
else:
idiomas = ""
@@ -513,7 +511,7 @@ def novedades_episodios(item):
idiomas = "( [COLOR teal][B]"
for idioma in episode['languages']: idiomas += idioma + " "
idiomas += "[/B][/COLOR])"
idiomas = bbcode_kodi2html(idiomas)
idiomas = idiomas
else:
idiomas = ""
@@ -522,7 +520,7 @@ def novedades_episodios(item):
except:
show = episode['show']['title']['en'].strip()
show = bbcode_kodi2html("[COLOR whitesmoke][B]" + show + "[/B][/COLOR]")
show = "[COLOR whitesmoke][B]" + show + "[/B][/COLOR]"
if episode['title']:
try:
@@ -610,8 +608,9 @@ def generos_series(item):
def findvideos(item):
logger.info()
itemlist = []
it1 = []
it2 = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
@@ -623,21 +622,21 @@ def findvideos(item):
item.url = item.url.split("###")[0]
if type == "2" and account and item.category != "Cine":
title = bbcode_kodi2html(" ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )")
title = " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )"
if "Favorito" in item.title:
title = bbcode_kodi2html(" ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )")
title = " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )"
if config.get_videolibrary_support():
title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )")
itemlist.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label,
title_label = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
it1.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label,
url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False))
title_label = bbcode_kodi2html(" ( [COLOR green][B]Tráiler[/B][/COLOR] )")
title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="buscartrailer", title=title_label, fulltitle=title_label, url=url_targets,
it1.append(
item.clone(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
thumbnail=item.thumbnail, show=item.show))
itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
@@ -663,7 +662,6 @@ def findvideos(item):
infolabels = {}
year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
infolabels["year"] = year
matches = []
for match in data_decrypt:
prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"]))
@@ -679,8 +677,10 @@ def findvideos(item):
for idioma, calidad, url, embed in matches:
mostrar_server = True
option = "Ver"
option1 = 1
if re.search(r'return ([\'"]{2,}|\})', embed):
option = "Descargar"
option1 = 2
calidad = unicode(calidad, "utf8").upper().encode("utf8")
title = option + ": %s (" + calidad + ")" + " (" + idioma + ")"
thumbnail = item.thumbnail
@@ -691,13 +691,15 @@ def findvideos(item):
if account:
url += "###" + id + ";" + type
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
it2.append(
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option))
contentTitle=item.title, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda it: it.title, reverse=True)
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
itemlist.extend(it1)
itemlist.extend(it2)
## 2 = película
if type == "2" and item.category != "Cine":
if config.get_videolibrary_support():
@@ -754,22 +756,6 @@ def extrae_idiomas(bloqueidiomas):
return textoidiomas, language
def bbcode_kodi2html(text):
if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"):
import re
text = re.sub(r'\[COLOR\s([^\]]+)\]',
r'<span style="color: \1">',
text)
text = text.replace('[/COLOR]', '</span>')
text = text.replace('[CR]', '<br>')
text = re.sub(r'\[([^\]]+)\]',
r'<\1>',
text)
text = text.replace('"color: white"', '"color: auto"')
return text
## --------------------------------------------------------------------------------
def set_status(item):
@@ -797,7 +783,7 @@ def set_status(item):
data = httptools.downloadpage(host + path, post=post).data
title = bbcode_kodi2html("[COLOR green][B]OK[/B][/COLOR]")
title = "[COLOR green][B]OK[/B][/COLOR]"
return [Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=item.url,
thumbnail=item.thumbnail, show=item.show, folder=False)]
@@ -815,15 +801,14 @@ def get_status(status, type, id):
try:
if id in status['favorites'][type]:
str1 = bbcode_kodi2html(" [COLOR orange][B]Favorito[/B][/COLOR]")
str1 = " [COLOR orange][B]Favorito[/B][/COLOR]"
except:
str1 = ""
try:
if id in status['status'][type]:
str2 = state[status['status'][type][id]]
if str2 != "": str2 = bbcode_kodi2html(
" [COLOR green][B]" + state[status['status'][type][id]] + "[/B][/COLOR]")
if str2 != "": str2 = "[COLOR green][B]" + state[status['status'][type][id]] + "[/B][/COLOR]"
except:
str2 = ""

View File

@@ -339,20 +339,20 @@ def episodios(item):
infoLabels = item.infoLabels
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug('data: %s'%data)
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
pagination = scrapertools.find_single_match(data, pattern)
if pagination:
pattern = '<li><a href="([^"]+)">Last<\/a>'
full_url = scrapertools.find_single_match(pagination, pattern)
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
list_pages = []
for x in range(1, int(last_page) + 1):
list_pages.append("%s%s" % (url, x))
list_pages = [item.url]
for x in range(2, int(last_page) + 1):
response = httptools.downloadpage('%s%s'% (url,x))
if response.sucess:
list_pages.append("%s%s" % (url, x))
else:
list_pages = [item.url]
logger.debug ('pattern: %s'%pattern)
for index, page in enumerate(list_pages):
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
@@ -424,7 +424,7 @@ def episodios(item):
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
return itemlist
def search(item, texto):

View File

@@ -14,18 +14,19 @@ from core.item import Item
from platformcode import config, logger
host = 'http://www.ohpelis.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0 Chrome/58.0.3029.110',
'Referer': host}
def mainlist(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host).data
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)<\/a> <i>(\d+)<\/i>'
matches = scrapertools.find_multiple_matches(data, patron)
mcantidad = 0
for scrapedurl, scrapedtitle, cantidad in matches:
mcantidad += int(cantidad)
itemlist.append(
item.clone(title="Peliculas",
item.clone(title="Peliculas (%s)" %mcantidad,
action='movies_menu'
))
@@ -95,14 +96,14 @@ def list_all(item):
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
title = scrapedtitle
plot = scrapedplot
thumbnail = scrapedthumbnail
url = scrapedurl
year = scrapedyear
new_item = (item.clone(title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fulltitle=title,
contentTitle=title,
infoLabels={'year': year}
))
if item.extra == 'serie':
@@ -114,7 +115,7 @@ def list_all(item):
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
tmdb.set_infoLabels(itemlist, True)
# Paginacion
next_page = scrapertools.find_single_match(data, '<link rel="next" href="(.*?) />')
if next_page:
@@ -162,7 +163,6 @@ def search_list(item):
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtype, scrapedyear, scrapedplot in matches:
title = scrapedtitle
plot = scrapedplot
thumbnail = scrapedthumbnail
url = scrapedurl
year = scrapedyear
@@ -170,7 +170,6 @@ def search_list(item):
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
infoLabels={'year': year})
if scrapedtype == 'movies':
new_item.action = 'findvideos'
@@ -275,11 +274,14 @@ def findvideos(item):
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.contentTitle = item.fulltitle
videoitem.infoLabels = item.infoLabels
if videoitem.server != 'youtube':
videoitem.title = item.title + ' (%s)' % videoitem.server
else:
videoitem.title = 'Trailer en %s' % videoitem.server
videoitem.action = 'play'
videoitem.server = ""
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
@@ -288,9 +290,9 @@ def findvideos(item):
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle,
))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
@@ -314,3 +316,8 @@ def newest(categoria):
return []
return itemlist
def play(item):
logger.info()
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -32,8 +32,8 @@ def mainlist(item):
url= host + "/calidad/hd-real-720", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, title=" Listado por género", action="porGenero", url= host))
itemlist.append(Item(channel=item.channel, title=" Buscar", action="search", url= host + "/?s="))
itemlist.append(Item(channel=item.channel, title=" Idioma", action="porIdioma", url= host))
itemlist.append(Item(channel=item.channel, title=" Buscar", action="search", url= host + "/?s="))
return itemlist
@@ -53,14 +53,10 @@ def porIdioma(item):
def porGenero(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'cat-item.*?href="([^"]+).*?>(.*?)<.*?span>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for urlgen, genero, cantidad in matches:
cantidad = cantidad.replace(".", "")
titulo = genero + " (" + cantidad + ")"
@@ -74,10 +70,9 @@ def search(item, texto):
logger.info()
texto_post = texto.replace(" ", "+")
item.url = host + "/?s=" + texto_post
try:
return listaBuscar(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
@@ -88,12 +83,9 @@ def search(item, texto):
def agregadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\r|\t|\s{2}|&nbsp;|"', "", data)
patron = scrapertools.find_multiple_matches (data,'<divclass=col-mt-5 postsh>.*?Duración')
for element in patron:
info = scrapertools.find_single_match(element,
"calidad>(.*?)<.*?ahref=(.*?)>.*?'reflectMe' src=(.*?)\/>.*?<h2>(.*?)"
@@ -107,16 +99,15 @@ def agregadas(item):
itemlist.append(Item(channel=item.channel,
action='findvideos',
contentType = "movie",
contentTitle = title,
fulltitle = title,
infoLabels={'year':year},
plot=plot,
quality=quality,
thumbnail=thumbnail,
title=title,
contentTitle = title,
url=url
))
tmdb.set_infoLabels_itemlist(itemlist, True)
tmdb.set_infoLabels(itemlist, True)
next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) ><i')
itemlist.append(Item(channel=item.channel, action="agregadas", title='Pagina Siguiente >>',
url=next_page.strip(),
@@ -144,17 +135,13 @@ def listaBuscar(item):
def findvideos(item):
logger.info()
itemlist = []
plot = item.plot
# Descarga la pagina
data = httptools.downloadpage(item.url).data
patron = 'cursor: hand" rel="(.*?)".*?class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedidioma, scrapedcalidad in matches:
idioma = ""
title = "%s [" + scrapedcalidad + "][" + scrapedidioma +"]"
if "youtube" in scrapedurl:
scrapedurl += "&"
@@ -163,8 +150,8 @@ def findvideos(item):
if not ("omina.farlante1" in scrapedurl or "404" in scrapedurl):
itemlist.append(
item.clone(channel=item.channel, action="play", title=title, fulltitle=item.title, url=scrapedurl,
plot=plot, quality= quality, language=language, extra = item.thumbnail))
tmdb.set_infoLabels_itemlist(itemlist, True)
quality= quality, language=language, extra = item.thumbnail))
tmdb.set_infoLabels(itemlist, True)
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
@@ -10,58 +9,57 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
host = "http://www.repelis.tv"
# Main list manual
def mainlist(item):
logger.info()
itemlist = []
item.url = "http://www.repelis.tv/pag/1"
mifan = "http://www.psicocine.com/wp-content/uploads/2013/08/Bad_Robot_Logo.jpg"
itemlist.append(Item(channel=item.channel, action="menupelis", title="Peliculas", url="http://www.repelis.tv/pag/1",
itemlist.append(Item(channel=item.channel, action="menupelis", title="Peliculas", url= host + "/pag/1",
thumbnail="http://www.gaceta.es/sites/default/files/styles/668x300/public"
"/metro_goldwyn_mayer_1926-web.png?itok=-lRSR9ZC",
fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menuestre", title="Estrenos",
url="http://www.repelis.tv/archivos/estrenos/pag/1",
url= host + "/archivos/estrenos/pag/1",
thumbnail="http://t0.gstatic.com/images?q=tbn"
":ANd9GcS4g68rmeLQFuX7iCrPwd00FI_OlINZXCYXEFrJHTZ0VSHefIIbaw",
fanart=mifan))
itemlist.append(
Item(channel=item.channel, action="menudesta", title="Destacadas", url="http://www.repelis.tv/pag/1",
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
url="http://www.repelis.tv/archivos/proximos-estrenos/pag/1",
url= host + "/archivos/proximos-estrenos/pag/1",
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
url="http://www.repelis.tv/pag/1",
url= host + "/pag/1",
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
if config.get_setting("adult_mode") != 0:
itemlist.append(Item(channel=item.channel, action="menupelis", title="Eroticas +18",
url="http://www.repelis.tv/genero/eroticas/pag/1",
url= host + "/genero/eroticas/pag/1",
thumbnail="http://www.topkamisetas.com/catalogo/images/TB0005.gif",
fanart="http://www.topkamisetas.com/catalogo/images/TB0005.gif", extra='adult'))
# Quito la busqueda por año si no esta enabled el adultmode, porque no hay manera de filtrar los enlaces
# eroticos72
itemlist.append(
Item(channel=item.channel, action="poranyo", title="Por Año", url="http://www.repelis.tv/anio/2016",
Item(channel=item.channel, action="poranyo", title="Por Año", url= host + "/anio/2016",
thumbnail="http://t3.gstatic.com/images?q=tbn:ANd9GcSkxiYXdBcI0cvBLsb_nNlz_dWXHRl2Q"
"-ER9dPnP1gNUudhrqlR",
fanart=mifan))
# Por categoria si que filtra la categoria de eroticos
itemlist.append(Item(channel=item.channel, action="porcateg", title="Por Categoria",
url="http://www.repelis.tv/genero/accion/pag/1",
url= host + "/genero/accion/pag/1",
thumbnail="http://www.logopro.it/blog/wp-content/uploads/2013/07/categoria-sigaretta"
"-elettronica.png",
fanart=mifan))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar...", url="http://www.repelis.tv/search/?s=",
Item(channel=item.channel, action="search", title="Buscar...", url= host + "/search/?s=",
thumbnail="http://thumbs.dreamstime.com/x/buscar-pistas-13159747.jpg", fanart=mifan))
return itemlist
@@ -70,9 +68,7 @@ def mainlist(item):
def menupelis(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
if item.extra == '':
@@ -85,8 +81,6 @@ def menupelis(item):
section = 'de %s'%item.extra
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces:
@@ -97,25 +91,18 @@ def menupelis(item):
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
logger.info("He encontrado el segundo bloque")
logger.info("extra_info: %s" % extra_info)
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
url = scrapedurl
thumbnail = scrapedthumbnail
quality = scrapertools.find_single_match(extra_info, 'calidad.*?>Calidad (.*?)<')
year = scrapertools.find_single_match(extra_info, '"anio">(\d{4})<')
language = scrapertools.find_multiple_matches(extra_info, 'class="(latino|espanol|subtitulado)"')
# if language = 'ingles':
# language='vo'
new_item=Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail, language=language, quality=quality,
infoLabels={'year': year})
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, language=language, quality=quality,
infoLabels={'year': year}))
tmdb.set_infoLabels(itemlist)
try:
next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"')
title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
@@ -139,9 +126,6 @@ def menudesta(item):
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
for bloque_enlaces in matchesenlaces:
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)".*?'
patron += '<img src="(.*?)"'
@@ -150,11 +134,10 @@ def menudesta(item):
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
url = scrapedurl
thumbnail = scrapedthumbnail
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
thumbnail=thumbnail, fanart = thumbnail))
return itemlist
@@ -180,8 +163,8 @@ def menuestre(item):
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
url = scrapedurl
thumbnail = scrapedthumbnail
quality = scrapertools.find_single_match(extra_info, 'calidad.*?>Calidad (.*?)<')
year = scrapertools.find_single_match(extra_info, '"anio">(\d{4})<')
language = scrapertools.find_single_match(extra_info, 'class="(latino|espanol|subtitulado)"')
@@ -190,10 +173,6 @@ def menuestre(item):
thumbnail=thumbnail, fanart=thumbnail, language=language, quality=quality,
infoLabels={'year': year}))
## Paginación
# <span class="current">2</span><a href="http://www.repelis.tv/page/3"
# Si falla no muestra ">> Página siguiente"
try:
next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"')
title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]"
@@ -205,48 +184,25 @@ def menuestre(item):
def findvideos(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<h2>Sinopsis</h2>.*?<p>(.*?)</p>.*?<div id="informacion".*?</h2>.*?<p>(.*?)</p>' # titulo
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for sinopsis, title in matches:
title = "[COLOR white][B]" + title + "[/B][/COLOR]"
patron = '<div id="informacion".*?>(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedplot in matches:
splot = title + "\n\n"
plot = scrapedplot
plot = re.sub('<h2>', "[COLOR red][B]", plot)
plot = re.sub('</h2>', "[/B][/COLOR] : ", plot)
plot = re.sub('<p>', "[COLOR green]", plot)
plot = re.sub('</p>', "[/COLOR]\n", plot)
plot = re.sub('<[^>]+>', "", plot)
splot += plot + "\n[COLOR red][B] Sinopsis[/B][/COLOR]\n " + sinopsis
# datos de los enlaces
'''
<a rel="nofollow" href="(.*?)".*?<td><img.*?</td><td>(.*?)</td><td>(.*?)</td></tr>
">Vimple</td>
'''
patron = '<tbody>(.*?)</tbody>'
matchesx = re.compile(patron, re.DOTALL).findall(data)
matchesx = scrapertools.find_multiple_matches(data, patron)
for bloq in matchesx:
patron = 'href="(.*?)".*?0 0">(.*?)</.*?<td>(.*?)</.*?<td>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloq)
matches = scrapertools.find_multiple_matches(bloq, patron)
for scrapedurl, scrapedserver, scrapedlang, scrapedquality in matches:
url = urlparse.urljoin(item.url, scrapedurl)
logger.info("Lang:[" + scrapedlang + "] Quality[" + scrapedquality + "] URL[" + url + "]")
url = scrapedurl
patronenlaces = '.*?://(.*?)/'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(scrapedurl)
matchesenlaces = scrapertools.find_multiple_matches(scrapedurl, patronenlaces)
scrapedtitle = ""
if scrapedserver == 'Vimple':
scrapedserver = 'vimpleru'
@@ -254,13 +210,14 @@ def findvideos(item):
scrapedserver = 'okru'
server = servertools.get_server_name(scrapedserver)
for scrapedenlace in matchesenlaces:
scrapedtitle = title + " [COLOR white][ [/COLOR]" + "[COLOR green]" + scrapedquality + "[/COLOR]" + "[COLOR white] ][/COLOR]" + " [COLOR red] [" + scrapedlang + "][/COLOR] » " + scrapedserver
scrapedtitle = "[COLOR white][ [/COLOR][COLOR green]" + scrapedquality + "[/COLOR]" + "[COLOR white] ][/COLOR] [COLOR red] [" + scrapedlang + "][/COLOR] » " + scrapedserver
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, extra=title, url=url,
fanart=item.thumbnail, thumbnail=item.thumbnail, plot=splot, language=scrapedlang,
quality=scrapedquality, server=server))
itemlist.append(item.clone(action="play", title=scrapedtitle, extra=title, url=url,
fanart=item.thumbnail, language=scrapedlang,
quality=scrapedquality, server = server))
tmdb.set_infoLabels(itemlist)
if itemlist:
itemlist.append(Item(channel=item.channel))
itemlist.append(Item(channel=item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
@@ -270,18 +227,15 @@ def findvideos(item):
fulltitle=item.fulltitle))
return itemlist
def play(item):
logger.info()
itemlist =[]
data = httptools.downloadpage(item.url).data
enc = scrapertools.find_multiple_matches(data, "Player\.decode\('(.*?)'\)")
dec=''
for cod in enc:
dec+=decode(cod)
url = scrapertools.find_single_match(dec,'src="(.*?)"')
url = scrapertools.find_single_match(dec, 'src="(.*?)"')
itemlist.append(item.clone(url=url))
return itemlist
@@ -290,7 +244,7 @@ def play(item):
def search(item, texto):
logger.info(item.url)
texto = texto.replace(" ", "+")
item.url = 'http://www.repelis.tv/buscar/?s=%s' % (texto)
item.url = host + '/buscar/?s=%s' % (texto)
logger.info(item.url)
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
@@ -311,8 +265,8 @@ def search(item, texto):
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
url = item.url + scrapedurl
thumbnail = item.url + scrapedthumbnail
logger.info(url)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
@@ -332,7 +286,7 @@ def poranyo(item):
for scrapedurl, scrapedtitle in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = urlparse.urljoin(item.url, scrapedurl)
url = item.url + scrapedurl
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra='year'))
@@ -345,13 +299,12 @@ def porcateg(item):
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = urlparse.urljoin(item.url, scrapedurl)
url = scrapedurl
logger.info(url)
# si no esta permitidas categoria adultos, la filtramos
extra = title

View File

@@ -1,49 +0,0 @@
{
"active": true,
"changes": [
{
"date": "16/02/2017",
"description": "Primera versión"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "((?:copiapop.com|diskokosmiko.mx)/[^\\s'\"]+)",
"url": "http://\\1"
}
]
},
"free": true,
"id": "copiapop",
"name": "copiapop",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/EjbfM7p.png?1",
"version": 1
}

View File

@@ -1,53 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
if "copiapop.com" in page_url:
from channels import copiapop
logueado, error_message = copiapop.login("copiapop.com")
if not logueado:
return False, error_message
data = httptools.downloadpage(page_url).data
if ("File was deleted" or "Not Found" or "File was locked by administrator") in data:
return False, "[Copiapop] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
host = "http://copiapop.com"
host_string = "copiapop"
if "diskokosmiko.mx" in page_url:
host = "http://diskokosmiko.mx"
host_string = "diskokosmiko"
url = scrapertools.find_single_match(data, '<form action="([^"]+)" class="download_form"')
if url:
url = host + url
fileid = url.rsplit("f=", 1)[1]
token = scrapertools.find_single_match(data,
'<div class="download_container">.*?name="__RequestVerificationToken".*?value="([^"]+)"')
post = "fileId=%s&__RequestVerificationToken=%s" % (fileid, token)
headers = {'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage(url, post, headers).data
data = jsontools.load(data)
mediaurl = data.get("DownloadUrl")
extension = data.get("Extension")
video_urls.append([".%s [%s]" % (extension, host_string), mediaurl])
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -45,7 +45,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers['Accept'] = "*/*"
headers['Host'] = "www.flashx.tv"
coding_url = 'https://www.flashx.tv/flashx.php?fxfx=7'
coding_url = 'https://www.flashx.tv/flashx.php?f=x&fxfx=6'
headers['X-Requested-With'] = 'XMLHttpRequest'
httptools.downloadpage(coding_url, headers=headers)

View File

@@ -24,7 +24,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, add_referer = True).data
data = httptools.downloadpage(page_url, add_referer = True, headers=headers).data
packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
@@ -32,7 +32,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = jsunpack.unpack(packer)
data = re.sub(r'\n|\t|\s+', '', data)
host = scrapertools.find_single_match(data, '\[\{image:"(http://[^/]+/)')
mediaurl = scrapertools.find_single_match(data, ',\{file:"([^"]+)"')
if not mediaurl.startswith(host):

View File

@@ -20,10 +20,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
key = scrapertools.find_single_match(data, "var mpri_Key\s*=\s*'([^']+)'")
data_vt = httptools.downloadpage("http://vidup.me/jwv/%s" % key).data
vt = scrapertools.find_single_match(data_vt, 'direct\|([^\|]+)\|')
vt = scrapertools.find_single_match(data_vt, 'file\|(.*?)\|direct')
# Extrae la URL
video_urls = []

View File

@@ -52,5 +52,6 @@
"visible": false
}
],
"thumbnail": "https://s1.postimg.org/597or3a31b/vidzi1.png",
"version": 1
}
}