Merge pull request #401 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-08-15 10:47:31 -05:00
committed by GitHub
24 changed files with 478 additions and 1220 deletions

View File

@@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
import os
import random
import re
import threading
import time
import traceback
from platformcode import platformtools
from BaseHTTPServer import HTTPServer
from HTTPWebSocketsHandler import HTTPWebSocketsHandler

View File

@@ -8,6 +8,8 @@ import sys
import threading
import time
from functools import wraps
# Requerido para el ejecutable en windows
import SimpleHTTPServer
sys.dont_write_bytecode = True
from platformcode import config

5
mediaserver/genera.bat Normal file
View File

@@ -0,0 +1,5 @@
REM Genera los archivos para el ejecutable en windows de Alfa Mediaserver
python setup.py py2exe -p channels,servers,lib,platformcode
xcopy lib dist\lib /y /s /i
xcopy platformcode dist\platformcode /y /s /i
xcopy resources dist\resources /y /s /i

View File

@@ -14,7 +14,7 @@ settings_dic = {}
adult_setting = {}
def get_addon_version(linea_inicio=0, total_lineas=2):
def get_addon_version(linea_inicio=0, total_lineas=2, with_fix=False):
'''
Devuelve el número de de versión del addon, obtenido desde el archivo addon.xml
'''

View File

@@ -7,12 +7,12 @@ import os
from inspect import isclass
from controller import Controller
from platformcode import logger
from platformcode import config, logger
def load_controllers():
controllers = []
path = os.path.split(__file__)[0]
path = os.path.join(config.get_runtime_path(),"platformcode\controllers")
for fname in os.listdir(path):
mod, ext = os.path.splitext(fname)
fname = os.path.join(path, fname)

18
mediaserver/setup.py Normal file
View File

@@ -0,0 +1,18 @@
# setup.py
# Para crear el ejecutable de Alfa mediaserver en windows
# Se usa py2exe
# Linea de comandos para la creacion: python setup.py py2exe -p channels,servers,lib,platformcode
from distutils.core import setup
import glob
import py2exe
setup(packages=['channels','servers','lib','platformcode','platformcode/controllers'],
data_files=[("channels",glob.glob("channels\\*.py")),
("channels",glob.glob("channels\\*.json")),
("servers",glob.glob("servers\\*.py")),
("servers",glob.glob("servers\\*.json")),
("",glob.glob("addon.xml")),
],
console=["alfa.py"]
)

View File

@@ -1,39 +0,0 @@
{
"id": "alltorrent",
"name": "Alltorrent",
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "altorrent.png",
"fanart": "altorrent.jpg",
"categories": [
"torrent",
"movie"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,314 +0,0 @@
# -*- coding: utf-8 -*-
import re
import sys
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
host = 'http://alltorrent.net/'
__modo_grafico__ = config.get_setting('modo_grafico', 'alltorrent')
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis = get_thumb("channels_movie.png")
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_hd = get_thumb("channels_tvshow_hd.png")
thumb_buscar = get_thumb("search.png")
itemlist.append(item.clone(title="[COLOR springgreen][B]Todas Las Películas[/B][/COLOR]", action="listado",
url=host, thumbnail=thumb_pelis, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 1080p[/COLOR]", action="listado",
url=host + "rezolucia/1080p/", thumbnail=thumb_pelis_hd, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 720p[/COLOR]", action="listado",
url=host + "rezolucia/720p/", thumbnail=thumb_pelis_hd, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen Hdrip[/COLOR]", action="listado",
url=host + "rezolucia/hdrip/", thumbnail=thumb_pelis, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 3D[/COLOR]", action="listado",
url=host + "rezolucia/3d/", thumbnail=thumb_pelis_hd, extra="pelicula"))
itemlist.append(item.clone(title="[COLOR floralwhite][B]Buscar[/B][/COLOR]", action="search", thumbnail=thumb_buscar,
extra="titulo"))
itemlist.append(item.clone(title="[COLOR oldlace] Por Título[/COLOR]", action="search", thumbnail=thumb_buscar,
extra="titulo"))
itemlist.append(item.clone(title="[COLOR oldlace] Por Año[/COLOR]", action="search", thumbnail=thumb_buscar,
extra="año"))
itemlist.append(item.clone(title="[COLOR oldlace] Por Rating Imdb[/COLOR]", action="search", thumbnail=thumb_buscar,
extra="rating"))
return itemlist
def listado(item):
logger.info()
itemlist = []
# Descarga la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
except:
pass
if not data and item.extra != "año": #Si la web está caída salimos sin dar error
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
elif not data and item.extra == "año": #cuando no hay datos para un año, da error. Tratamos de evitar el error
return itemlist
patron = '<div class="browse-movie-wrap col-xs-10 col-sm-4 col-md-5 col-lg-4"><a href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)".*?rel="tag">([^"]+)<\/a>\s?<\/div><div class="[^"]+">(.*?)<\/div><\/div><\/div>'
#data = scrapertools.find_single_match(data, patron)
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches and not '<ul class="tsc_pagination tsc_pagination' in data: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedqualities in matches:
item_local = item.clone() #Creamos copia de Item para trabajar
title_subs = []
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
item_local.url = scrapedurl
item_local.thumbnail = scrapedthumbnail
scrapedtorrent = ''
if scrapedqualities:
patron_quality = '<a href="([^"]+)"\s?rel="[^"]+"\s?title="[^"]+">(.*?)<\/a>'
matches_quality = re.compile(patron_quality, re.DOTALL).findall(scrapedqualities)
quality = ''
for scrapedtorrent, scrapedquality in matches_quality:
quality_inter = scrapedquality
quality_inter = re.sub('HDr$', 'HDrip', quality_inter)
quality_inter = re.sub('720$', '720p', quality_inter)
quality_inter = re.sub('1080$', '1080p', quality_inter)
if quality:
quality += ', %s' % quality_inter
else:
quality = quality_inter
if quality:
item_local.quality = quality
item_local.language = [] #Verificamos el idioma por si encontramos algo
if "latino" in scrapedtorrent.lower() or "latino" in item.url or "latino" in title.lower():
item_local.language += ["LAT"]
if "ingles" in scrapedtorrent.lower() or "ingles" in item.url or "vose" in scrapedurl or "vose" in item.url:
if "VOSE" in scrapedtorrent.lower() or "sub" in title.lower() or "vose" in scrapedurl or "vose" in item.url:
item_local.language += ["VOS"]
else:
item_local.language += ["VO"]
if "dual" in scrapedtorrent.lower() or "dual" in title.lower():
item_local.language[0:0] = ["DUAL"]
#Limpiamos el título de la basura innecesaria
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "")
title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "")
title = re.sub(r'\??\s?\d*?\&.*', '', title).title().strip()
item_local.from_title = title #Guardamos esta etiqueta para posible desambiguación de título
item_local.contentType = "movie"
item_local.contentTitle = title
item_local.extra = "peliculas"
item_local.action = "findvideos"
item_local.title = title.strip()
item_local.infoLabels['year'] = "-"
if scrapedyear >= "1900" and scrapedyear <= "2040":
title_subs += [scrapedyear]
itemlist.append(item_local.clone()) #Pintar pantalla
#if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
# return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
#Pasamos a TMDB la lista completa Itemlist
tmdb.set_infoLabels(itemlist, True)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
# Extrae el paginador
patron = '<li><a href="[^"]+">(\d+)<\/a><\/li>' #total de páginas
patron += '<li><a href="([^"]+\/page\/(\d+)\/)"\s?rel="[^"]+">Página siguiente[^<]+<\/a><\/li><\/ul><\/div><\/ul>' #url siguiente
url_next = ''
if scrapertools.find_single_match(data, patron):
last_page, url_next, next_num = scrapertools.find_single_match(data, patron)
if url_next:
if last_page:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s de %s' % (int(next_num) - 1, last_page)
else:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(next_num) - 1)
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url_next, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
#Bajamos los datos de la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
except:
pass
if not data:
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
patron = 'id="modal-quality-\w+"><span>(.*?)</span>.*?class="quality-size">(.*?)</p>.*?href="([^"]+)"' #coge los .torrent
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches: #error
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
for scrapedquality, scrapedsize, scrapedtorrent in matches: #leemos los torrents con la diferentes calidades
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
item_local.quality = scrapedquality
if item.infoLabels['duration']:
item_local.quality += scrapertools.find_single_match(item.quality, '(\s\[.*?\])') #Copiamos la duración
#Añadimos el tamaño para todos
item_local.quality = '%s [%s]' % (item_local.quality, scrapedsize) #Agregamos size al final de calidad
item_local.quality = item_local.quality.replace("G", "G ").replace("M", "M ") #Se evita la palabra reservada en Unify
#Ahora pintamos el link del Torrent
item_local.url = scrapedtorrent
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist.append(item_local.clone()) #Pintar pantalla
#logger.debug("TORRENT: " + scrapedtorrent + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + scrapedsize + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
#Ahora tratamos el servidor directo
item_local = item.clone()
servidor = 'openload'
item_local.quality = ''
if item.infoLabels['duration']:
item_local.quality = scrapertools.find_single_match(item.quality, '(\s\[.*?\])') #Copiamos la duración
enlace = scrapertools.find_single_match(data, 'button-green-download-big".*?href="([^"]+)"><span class="icon-play">')
if enlace:
try:
devuelve = servertools.findvideosbyserver(enlace, servidor) #existe el link ?
if devuelve:
enlace = devuelve[0][1] #Se guarda el link
item_local.alive = "??" #Se asume poe defecto que es link es dudoso
#Llama a la subfunción de check_list_links(itemlist) para cada link de servidor
item_local.alive = servertools.check_video_link(enlace, servidor, timeout=5) #activo el link ?
#Si el link no está activo se ignora
if item_local.alive == "??": #dudoso
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), item_local.quality, str(item_local.language))
elif item_local.alive.lower() == "no": #No está activo. Lo preparo, pero no lo pinto
item_local.title = '[COLOR red][%s][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.alive, servidor.capitalize(), item_local.quality, str(item_local.language))
logger.debug(item_local.alive + ": ALIVE / " + title + " / " + servidor + " / " + enlace)
raise
else: #Sí está activo
item_local.title = '[COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (servidor.capitalize(), item_local.quality, str(item_local.language))
#Preparamos el resto de variables de Item para ver los vídeos en directo
item_local.action = "play"
item_local.server = servidor
item_local.url = enlace
item_local.title = item_local.title.replace("[]", "").strip()
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title).strip()
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title).strip()
itemlist.append(item_local.clone())
#logger.debug(item_local)
except:
pass
return itemlist
def actualizar_titulos(item):
logger.info()
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
#Volvemos a la siguiente acción en el canal
return item
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
if item.extra == "titulo":
item.url = host + "?s=" + texto
elif item.extra == "año":
item.url = host + "weli/" + texto + "/"
else:
item.extra == "imdb"
item.url = host + "imdb/" + texto + "/"
if texto != '':
return listado(item)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = host
item.extra = "peliculas"
item.channel = "alltorrents"
itemlist = listado(item)
if itemlist[-1].title == "Página siguiente >>":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,53 @@
{
"id": "seriesanimadas",
"name": "SeriesAnimadas",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s22.postimg.cc/3lcxb3qfl/logo.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"VOSE"
]
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,301 @@
# -*- coding: utf-8 -*-
# -*- Channel SeriesAnimadas -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'latino': 'LAT', 'subtitulado':'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = [
'directo',
'openload',
]
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'seriesanimadas')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'seriesanimadas')
host = 'https://www.seriesanimadas.net/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Nuevos Capitulos', url=host, action='new_episodes', type='tvshows',
thumbnail=get_thumb('new_episodes', auto=True)))
itemlist.append(Item(channel=item.channel, title='Ultimas', url=host + 'series?', action='list_all', type='tvshows',
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'series?', action='list_all', type='tvshows',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + 'search?s=',
thumbnail=get_thumb("search", auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article class=".*?">.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)">.*?'
patron +='<span class="year">(\d{4})</span>.*?<span class="(?:animes|tvs)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedtype in matches:
title = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
if scrapedtype == 'Anime':
action = 'episodesxseasons'
elif scrapedtype == 'Serie':
action = 'seasons'
new_item = Item(channel=item.channel,
action=action,
title=title,
url=url,
contentSerieName=scrapedtitle,
thumbnail=thumbnail,
type=scrapedtype,
infoLabels={'year':year})
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'li><a href="([^"]+)" rel="next">&raquo;</a>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<li class="gridseason"><a href="([^"]+)"><span class="title">Temporada (\d+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, season in matches:
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
if item.type == 'Anime':
season = '1'
else:
season = item.infoLabels['season']
episode = len(matches)
for scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = episode
url = scrapedurl
title = scrapedtitle.replace(' online', '')
title = '%sx%s - %s' % (season, episode, title)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
episode -= 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def new_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article class="contenedor">.*?<a href="([^"]+)" title=".*?">.*?data-src="([^"]+)" alt="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumb,
action='findvideos'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'video\[\d+\] = .*?src="([^"]+)".*?;'
matches = re.compile(patron, re.DOTALL).findall(data)
option = 1
for scrapedurl in matches:
lang = scrapertools.find_single_match(data, '"#option%s">([^<]+)<' % str(option)).strip()
lang = lang.lower()
if lang not in IDIOMAS:
lang = 'subtitulado'
quality = ''
title = '%s %s'
if 'redirector' in scrapedurl:
url_data = httptools.downloadpage(scrapedurl).data
url = scrapertools.find_single_match(url_data,'window.location.href = "([^"]+)";')
else:
url = scrapedurl
if url != '':
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality,
language=IDIOMAS[lang], infoLabels=item.infoLabels))
option += 1
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<div class="search-results__img"><a href="([^"]+)" title=".*?"><img src="([^"]+)".*?'
patron += '<h2>([^<]+)</h2></a><div class="description">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, scrapedplot in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
new_item=Item(channel=item.channel, title=title, url=url, contentSerieName=title, thumbnail=thumbnail,
plot=plot, action='seasons')
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + 'genre/animacion/'
elif categoria == 'terror':
item.url = host + 'genre/terror/'
item.type='movies'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -15,7 +15,7 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://seriesblanco.xyz/'
host = 'http://seriesblanco.org/'
IDIOMAS = {'es': 'Cast', 'la': 'Lat', 'vos': 'VOSE', 'vo': 'VO'}
list_language = IDIOMAS.values()
@@ -40,7 +40,7 @@ def mainlist(item):
title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'listado/',
url=host + 'lista-de-series/',
))
itemlist.append(Item(channel=item.channel,
@@ -54,11 +54,12 @@ def mainlist(item):
title="A - Z",
action="section",
thumbnail=get_thumb('alphabet', auto=True),
url=host+'listado/', ))
url=host+'lista-de-series/', ))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
url=host+"?s=",
thumbnail=get_thumb('search', auto=True)))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
@@ -69,7 +70,7 @@ def mainlist(item):
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
@@ -79,13 +80,13 @@ def list_all(item):
data = get_source(item.url)
contentSerieName = ''
patron = "<div style='float.*?<a href='(.*?)'>.*?src='(.*?)' title='(.*?)'"
patron = '<div style="float.*?<a href="([^"]+)">.*?src="([^"]+)".*?data-original-title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = host + scrapedurl
url = scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.decodeHtmlentities(scrapedtitle)
@@ -103,13 +104,12 @@ def list_all(item):
# #Paginacion
if itemlist != []:
base_page = scrapertools.find_single_match(item.url,'(.*?)?')
next_page = scrapertools.find_single_match(data, '</span><a href=?pagina=2>>></a>')
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
action="list_all",
title='Siguiente >>>',
url=base_page+next_page,
url=next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
@@ -120,17 +120,14 @@ def section(item):
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href=([^ ]+)><i class=fa fa-bookmark-o></i> (.*?)</a></li>'
patron = '<li><a href="([^ ]+)"><i class="fa fa-bookmark-o"></i> ([^<]+)</a></li>'
elif item.title == 'A - Z':
patron = "<a dir='ltr' href=(.*?) class='label label-primary'>(.*?)</a>"
patron = '<a dir="ltr" href="([^"]+)" class="label label-primary">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if item.title == 'Generos':
url = host + scrapedurl
else:
url = scrapedurl
url = scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
@@ -143,7 +140,7 @@ def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<span itemprop=seasonNumber class=fa fa-arrow-down>.*?Temporada (\d+) '
patron = '<span itemprop="seasonNumber" class="fa fa-arrow-down">.*?Temporada (\d+) '
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for scrapedseason in matches:
@@ -184,14 +181,13 @@ def episodesxseason(item):
logger.info()
itemlist = []
data = get_source(item.url)
season = item.contentSeasonNumber
season_data = scrapertools.find_single_match(data, '<div id=collapse%s.*?panel-primary' % season)
patron = "<td><a href='([^ ]+)'.*?itemprop='episodeNumber'>%s+x(\d+)</span> - (.*?) </a>.*?(/banderas.*?)</td>" % season
season_data = scrapertools.find_single_match(data, '<div id="collapse%s".*?</tbody>' % season)
patron = '<td><a href="([^ ]+)".*?itemprop="episodeNumber">%sx(\d+)</span> (.*?) </a>.*?<td>(.*?)</td>' % season
matches = re.compile(patron, re.DOTALL).findall(season_data)
infoLabels = item.infoLabels
for scrapedurl, scraped_episode, scrapedtitle, lang_data in matches:
url = host + scrapedurl
url = scrapedurl
title = '%sx%s - %s' % (season, scraped_episode, scrapedtitle.strip())
infoLabels['episode'] = scraped_episode
thumbnail = item.thumbnail
@@ -217,18 +213,19 @@ def new_episodes(item):
data = get_source(item.url)
patron = "padding-left:15px'><a href=(.*?) >.*?src='(.*?)' title=.*? alt='(.*?) (\d+x\d+)'.*?"
patron += "<span class='strong'>.*?</span>(.*?)</button></a></div>"
data = scrapertools.find_single_match(data,
'<center>Series Online : Capítulos estrenados recientemente</center>.*?</ul>')
patron = '<li><h6.*?src="([^"]+)".*?href="([^"]+)">.*?src="([^"]+)".*? data-original-title=" (\d+x\d+).*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedinfo, lang_data in matches:
for lang_data, scrapedurl, scrapedthumbnail, scrapedinfo, in matches:
url = host+scrapedurl
thumbnail = scrapedthumbnail
scrapedinfo = scrapedinfo.split('x')
season = scrapedinfo[0]
episode = scrapedinfo[1]
scrapedtitle = scrapertools.find_single_match(url, 'capitulo/([^/]+)/').replace("-", " ")
title = '%s - %sx%s' % (scrapedtitle, season, episode )
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
@@ -244,7 +241,7 @@ def new_episodes(item):
def add_language(title, string):
logger.info()
languages = scrapertools.find_multiple_matches(string, '/banderas/(.*?).png')
languages = scrapertools.find_multiple_matches(string, '/language/(.*?).png')
language = []
for lang in languages:
@@ -269,34 +266,38 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = "<a href=([^ ]+) target=_blank><img src='/servidores/(.*?).(?:png|jpg)'.*?sno.*?"
patron += "<span>(.*?)<.*?(/banderas.*?)td"
patron = '<imgsrc="([^"]+)".*?<a class="open-link" data-enlace="([^"]+)".*?<td>([^<]+)</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, server, quality, lang_data in matches:
title = server.capitalize()
for lang_data, scrapedurl, quality in matches:
encrypted=False
title = '%s'
if quality == '':
quality = 'SD'
title = '%s [%s]' % (title, quality)
title, language = add_language(title, lang_data)
thumbnail = item.thumbnail
enlace_id, serie_id, se, ep = scrapertools.find_single_match(scrapedurl,'enlace(\d+)/(\d+)/(\d+)/(\d+)/')
url = host + 'ajax/load_enlace.php?serie=%s&temp=%s&cap=%s&id=%s' % (serie_id, se, ep, enlace_id)
url = scrapedurl
if 'streamcrypt' in url:
url = url.replace('https://streamcrypt', 'https://www.streamcrypt')
temp_data = httptools.downloadpage(url, follow_redirects=False, only_headers=True)
if 'location' in temp_data.headers:
url = temp_data.headers['location']
else:
continue
itemlist.append(Item(channel=item.channel,
title=title,
url=url,
action="play",
thumbnail=thumbnail,
server=server,
quality=quality,
language=language,
encrypted=encrypted,
infoLabels=item.infoLabels
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
@@ -307,25 +308,14 @@ def findvideos(item):
return sorted(itemlist, key=lambda it: it.language)
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, follow_redirects=False).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host + 'finder.php', post=item.post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "<a href='(.*?)'>.*?src=(.*?) style.*?value=(.*?)>"
data = get_source(item.url)
patron = '<div style="float.*?<a href="([^"]+)">.*?src="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -344,10 +334,10 @@ def search_results(item):
def search(item, texto):
logger.info()
import urllib
if texto != '':
post = {'query':texto}
post = urllib.urlencode(post)
item.post = post
texto = texto.replace(" ", "+")
item.url = item.url + texto
return search_results(item)
if texto != '':
return list_all(item)
else:
return []

View File

@@ -1,37 +0,0 @@
{
"id": "seriesverde",
"name": "SeriesVerde",
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s33.postimg.cc/96dhv4trj/seriesverde.png",
"banner": "",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Cast",
"Lat",
"VOSE",
"VO"
]
}
]
}

View File

@@ -1,321 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel SeriesVerde -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://seriesverde.com/'
IDIOMAS = {'es': 'Cast', 'la': 'Lat', 'vos': 'VOSE', 'vo': 'VO'}
list_language = IDIOMAS.values()
list_quality = ['SD', 'Micro-HD-720p', '720p', 'HDitunes', 'Micro-HD-1080p' ]
list_servers = ['powvideo','yourupload', 'openload', 'gamovideo', 'flashx', 'clipwatching', 'streamango', 'streamcloud']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'listado/',
))
itemlist.append(Item(channel=item.channel,
title="Generos",
action="section",
thumbnail=get_thumb('genres', auto=True),
url=host,
))
itemlist.append(Item(channel=item.channel,
title="A - Z",
action="section",
thumbnail=get_thumb('alphabet', auto=True),
url=host+'listado/', ))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
thumbnail=get_thumb('search', auto=True)))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
contentSerieName = ''
patron = "<div style='float.*?<a href='(.*?)'>.*?src='(.*?)' title='(.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = host + scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(Item(channel=item.channel,
action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=scrapedtitle,
contentSerieName=contentSerieName,
context=filtertools.context(item, list_language, list_quality),
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
base_page = scrapertools.find_single_match(item.url,'(.*?)?')
next_page = scrapertools.find_single_match(data, '</span><a href=?pagina=2>>></a>')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=base_page+next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href=([^ ]+) rel=nofollow><i class=fa fa-bookmark-o></i> (.*?)</a></li>'
elif item.title == 'A - Z':
patron = "<a dir='ltr' href=(.*?) class='label label-success'>(.*?)</a>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if item.title == 'Generos':
url = host + scrapedurl
else:
url = scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
title=title,
url=url
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<span itemprop=seasonNumber class=fa fa-arrow-down>.*?Temporada (\d+) '
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for scrapedseason in matches:
url = item.url
title = 'Temporada %s' % scrapedseason
contentSeasonNumber = scrapedseason
infoLabels['season'] = contentSeasonNumber
thumbnail = item.thumbnail
itemlist.append(Item(channel=item.channel,
action="episodesxseason",
title=title,
url=url,
thumbnail=thumbnail,
contentSeasonNumber=contentSeasonNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
data = get_source(item.url)
season = item.contentSeasonNumber
season_data = scrapertools.find_single_match(data, '<div id=collapse%s.*?panel-success' % season)
patron = "<td><a href='([^ ]+)'.*?itemprop='episodeNumber'>%s+x(\d+)</span> - (.*?) </a>.*?(/banderas.*?)</td>" % season
matches = re.compile(patron, re.DOTALL).findall(season_data)
infoLabels = item.infoLabels
for scrapedurl, scraped_episode, scrapedtitle, lang_data in matches:
url = host + scrapedurl
title = '%sx%s - %s' % (season, scraped_episode, scrapedtitle.strip())
infoLabels['episode'] = scraped_episode
thumbnail = item.thumbnail
title, language = add_language(title, lang_data)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumbnail,
language=language,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def add_language(title, string):
logger.info()
languages = scrapertools.find_multiple_matches(string, '/banderas/(.*?).png')
language = []
for lang in languages:
if 'jap' in lang or lang not in IDIOMAS:
lang = 'vos'
if len(languages) == 1:
language = IDIOMAS[lang]
title = '%s [%s]' % (title, language)
else:
language.append(IDIOMAS[lang])
title = '%s [%s]' % (title, IDIOMAS[lang])
return title, language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<a href=([^ ]+) target=_blank><img src='/servidores/(.*?).(?:png|jpg)'.*?sno.*?"
patron += "<span>(.*?)<.*?(/banderas.*?)td"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, server, quality, lang_data in matches:
title = server.capitalize()
if quality == '':
quality = 'SD'
title = '%s [%s]' % (title, quality)
title, language = add_language(title, lang_data)
thumbnail = item.thumbnail
enlace_id, serie_id, se, ep = scrapertools.find_single_match(scrapedurl,'enlace(\d+)/(\d+)/(\d+)/(\d+)/')
url = host + 'ajax/load_enlace.php?serie=%s&temp=%s&cap=%s&id=%s' % (serie_id, se, ep, enlace_id)
itemlist.append(Item(channel=item.channel,
title=title,
url=url,
action="play",
thumbnail=thumbnail,
server=server,
quality=quality,
language=language,
infoLabels=item.infoLabels
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return sorted(itemlist, key=lambda it: it.language)
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, follow_redirects=False).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host + 'finder.php', post=item.post).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "<a href='(.*?)'>.*?src=(.*?) style.*?value=(.*?)>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel,
title=scrapedtitle,
url=host+scrapedurl,
action="seasons",
thumbnail=scrapedthumb,
contentSerieName=scrapedtitle,
context=filtertools.context(item, list_language, list_quality)
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
import urllib
if texto != '':
post = {'query':texto}
post = urllib.urlencode(post)
item.post = post
return search_results(item)

View File

@@ -1,48 +0,0 @@
{
"id": "yaske",
"name": "Yaske",
"active": false,
"adult": false,
"language": ["cast", "lat"],
"banner": "yaske.png",
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/yaske.png",
"thumbnail": "yaske.png",
"categories": [
"direct",
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,349 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import unicodedata
from core import channeltools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
domain = "yaske.ro"
HOST = "http://www." + domain
HOST_MOVIES = "http://peliculas." + domain + "/now_playing/"
HOST_TVSHOWS = "http://series." + domain + "/popular/"
HOST_TVSHOWS_TPL = "http://series." + domain + "/tpl"
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = HOST
item.text_color = color2
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Peliculas", text_bold=True, viewcontent='movies',
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Novedades", action="peliculas", viewcontent='movies',
url=HOST_MOVIES,
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Estrenos", action="peliculas",
url=HOST + "/premiere", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", thumbnail=thumbnail % 'generos', viewmode="thumbnails",
url=HOST
))
itemlist.append(item.clone(title=" Buscar película", action="search", thumbnail=thumbnail % 'buscar',
type = "movie" ))
itemlist.append(item.clone(title="Series", text_bold=True, viewcontent='movies',
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Novedades", action="series", viewcontent='movies',
url=HOST_TVSHOWS,
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Buscar serie", action="search", thumbnail=thumbnail % 'buscar',
type = "tvshow" ))
return itemlist
def series(item):
logger.info()
itemlist = []
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
if not page:
page = 1
url_p = item.url
else:
page = int(page) + 1
if "search" in item.url:
url_p += "&page=%s" %page
else:
url_p += "?page=%s" %page
data = httptools.downloadpage(url_p).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="(http://series[^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-play-circle"></i>([^<]+).*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedepisodes, year, scrapedtitle in matches:
scrapedepisodes.strip()
year = year.strip()
contentSerieName = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s (%s)" %(contentSerieName, scrapedepisodes)
if "series" in scrapedurl:
itemlist.append(Item(channel=item.channel, action="temporadas", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentSerieName=contentSerieName,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist, True)
# Si es necesario añadir paginacion
patron_next_page = 'href="([^"]+)">\s*&raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if matches_next_page and len(itemlist)>0:
itemlist.append(
Item(channel=item.channel, action="series", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_p, folder=True, text_color=color3, text_bold=True))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
post = []
data = httptools.downloadpage(item.url).data
patron = 'media-object" src="([^"]+).*?'
patron += 'media-heading">([^<]+).*?'
patron += '<code>(.*?)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedcapitulos in matches:
id = scrapertools.find_single_match(item.url, "yaske.ro/([0-9]+)")
season = scrapertools.find_single_match(scrapedtitle, "[0-9]+")
title = scrapedtitle + " (%s)" %scrapedcapitulos.replace("</code>","").replace("\n","")
post = {"data[season]" : season, "data[id]" : id, "name" : "list_episodes" , "both" : "0", "type" : "template"}
post = urllib.urlencode(post)
item.infoLabels["season"] = season
itemlist.append(item.clone(action = "capitulos",
post = post,
title = title,
url = HOST_TVSHOWS_TPL
))
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title =""))
itemlist.append(item.clone(action = "add_serie_to_library",
channel = item.channel,
extra = "episodios",
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url = item.url
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += capitulos(tempitem)
return itemlist
def capitulos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, post=item.post).data
data = data.replace("<wbr>","")
patron = 'href=."([^"]+).*?'
patron += 'media-heading.">([^<]+).*?'
patron += 'fecha de emisi.*?: ([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapeddate in matches:
scrapedtitle = scrapedtitle + " (%s)" %scrapeddate
episode = scrapertools.find_single_match(scrapedurl, "capitulo-([0-9]+)")
query = item.contentSerieName + " " + scrapertools.find_single_match(scrapedtitle, "\w+")
item.infoLabels["episode"] = episode
itemlist.append(item.clone(action = "findvideos",
title = scrapedtitle.decode("unicode-escape"),
query = query.replace(" ","+"),
url = scrapedurl.replace("\\","")
))
tmdb.set_infoLabels(itemlist)
return itemlist
def search(item, texto):
logger.info()
itemlist = []
try:
item.url = HOST + "/search/?query=" + texto.replace(' ', '+')
item.extra = ""
if item.type == "movie":
itemlist.extend(peliculas(item))
else:
itemlist.extend(series(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
item.url = HOST
elif categoria == 'infantiles':
item.url = HOST + "/genre/16/"
elif categoria == 'terror':
item.url = HOST + "/genre/27/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
if not page:
page = 1
url_p = item.url
else:
page = int(page) + 1
if "search" in item.url:
url_p += "&page=%s" %page
else:
url_p += "?page=%s" %page
data = httptools.downloadpage(url_p).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="([^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'post(.*?)</div.*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, year, idiomas, scrapedtitle in matches:
query = scrapertools.find_single_match(scrapedurl, 'yaske.ro/[0-9]+/(.*?)/').replace("-","+")
year = year.strip()
patronidiomas = '<img src="([^"]+)"'
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
for lang in idiomas1.keys():
if idioma.endswith(lang):
idiomas_disponibles.append(idiomas1[lang])
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle, query = query,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
patron_next_page = 'href="([^"]+)">\s*&raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if matches_next_page and len(itemlist)>0:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_p, folder=True, text_color=color3, text_bold=True))
return itemlist
def menu_buscar_contenido(item):
logger.info(item)
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'Generos.*?</ul>'
data = scrapertools.find_single_match(data, patron)
patron = 'href="([^"]+)">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
url = HOST + scrapedurl
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = scrapedtitle,
url = url,
text_color = color1,
contentType = 'movie',
folder = True,
viewmode = "movie_with_plot"
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
sublist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)id="online".*?server="([^"]+)"'
mserver = scrapertools.find_single_match(data, patron)
if not item.query:
item.query = scrapertools.find_single_match(item.url, "peliculas.*?/[0-9]+/([^/]+)").replace("-","+")
url_m = "http://olimpo.link/?q=%s&server=%s" %(item.query, mserver)
patron = 'class="favicon.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
data = httptools.downloadpage(url_m).data
matches = scrapertools.find_multiple_matches(data, patron)
page = 2
while len(matches)>0:
for server, url, idioma, calidad in matches:
if "drive" in server:
server = "gvideo"
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(),
server = server,
title="Ver en %s %s" %(server, calidad)
))
data = httptools.downloadpage(url_m + "&page=%s" %page).data
matches = scrapertools.find_multiple_matches(data, patron)
page +=1
sublist = sorted(sublist, key=lambda Item: Item.quality + Item.server)
for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
itemlist.append(item.clone(title=k, folder=False, infoLabels = "",
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
tmdb.set_infoLabels(itemlist, True)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer",
text_color=color3, title=title, viewmode="list"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist
def play(item):
logger.info()
itemlist = []
ddd = httptools.downloadpage(item.url).data
url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)')
item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone(server = ""))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -2902,7 +2902,7 @@ msgid "Search Movies/TV Shows"
msgstr ""
msgctxt "#70047"
msgid " Search by director"
msgid "Search by director"
msgstr ""
msgctxt "#70048"

View File

@@ -2890,7 +2890,7 @@ msgid "Search Movies/TV Shows"
msgstr "Cerca Film/Serie TV"
msgctxt "#70047"
msgid " Search by director"
msgid "Search by director"
msgstr "Cerca per regista"
msgctxt "#70048"

View File

@@ -479,7 +479,7 @@ msgstr "Elegir canales incluidos en la búsqueda"
msgctxt "#59995"
msgid "Saved Searches"
msgstr "Búsquedasguardadas"
msgstr "Búsquedas guardadas"
msgctxt "#59996"
msgid "Delete saved searches"
@@ -2846,7 +2846,7 @@ msgid "Genres"
msgstr "Géneros"
msgctxt "#70033"
msgid "Actors / Actresses by popularity"
msgid "Actors/Actresses by popularity"
msgstr "Actores/Actrices por popularidad"
msgctxt "#70034"
@@ -2858,12 +2858,12 @@ msgid "Search %s"
msgstr "Buscar %s"
msgctxt "#70036"
msgid " Search actor/actress"
msgstr " Buscar actor/actriz"
msgid "Search actor/actress"
msgstr "Buscar actor/actriz"
msgctxt "#70037"
msgid " Search director, writer..."
msgstr " Buscar director, guionista..."
msgid "Search director, writer..."
msgstr "Buscar director, escritor..."
msgctxt "#70038"
msgid "Custom Filter"
@@ -2902,8 +2902,8 @@ msgid "Search Movies/TV Shows"
msgstr "Buscar Películas/Series"
msgctxt "#70047"
msgid " Search by director"
msgstr " Buscar por director"
msgid "Search by director"
msgstr "Buscar por director"
msgctxt "#70048"
msgid " My Account"

View File

@@ -479,7 +479,7 @@ msgstr "Elegir canales incluidos en la búsqueda"
msgctxt "#59995"
msgid "Saved Searches"
msgstr "Búsquedasguardadas"
msgstr "Búsquedas guardadas"
msgctxt "#59996"
msgid "Delete saved searches"
@@ -2846,7 +2846,7 @@ msgid "Genres"
msgstr "Géneros"
msgctxt "#70033"
msgid "Actors / Actresses by popularity"
msgid "Actors/Actresses by popularity"
msgstr "Actores/Actrices por popularidad"
msgctxt "#70034"
@@ -2858,12 +2858,12 @@ msgid "Search %s"
msgstr "Buscar %s"
msgctxt "#70036"
msgid " Search actor/actress"
msgstr " Buscar actor/actriz"
msgid "Search actor/actress"
msgstr "Buscar actor/actriz"
msgctxt "#70037"
msgid " Search director, writer..."
msgstr " Buscar director, guionista..."
msgid "Search director, writer..."
msgstr "Buscar director, escritor..."
msgctxt "#70038"
msgid "Custom Filter"
@@ -2902,8 +2902,8 @@ msgid "Search Movies/TV Shows"
msgstr "Buscar Películas/Series"
msgctxt "#70047"
msgid " Search by director"
msgstr " Buscar por director"
msgid "Search by director"
msgstr "Buscar por director"
msgctxt "#70048"
msgid " My Account"

View File

@@ -479,7 +479,7 @@ msgstr "Elegir canales incluidos en la búsqueda"
msgctxt "#59995"
msgid "Saved Searches"
msgstr "Búsquedasguardadas"
msgstr "Búsquedas guardadas"
msgctxt "#59996"
msgid "Delete saved searches"
@@ -2846,7 +2846,7 @@ msgid "Genres"
msgstr "Géneros"
msgctxt "#70033"
msgid "Actors / Actresses by popularity"
msgid "Actors/Actresses by popularity"
msgstr "Actores/Actrices por popularidad"
msgctxt "#70034"
@@ -2858,12 +2858,12 @@ msgid "Search %s"
msgstr "Buscar %s"
msgctxt "#70036"
msgid " Search actor/actress"
msgstr " Buscar actor/actriz"
msgid "Search actor/actress"
msgstr "Buscar actor/actriz"
msgctxt "#70037"
msgid " Search director, writer..."
msgstr " Buscar director, guionista..."
msgid "Search director, writer..."
msgstr "Buscar director, escritor..."
msgctxt "#70038"
msgid "Custom Filter"
@@ -2902,8 +2902,8 @@ msgid "Search Movies/TV Shows"
msgstr "Buscar Películas/Series"
msgctxt "#70047"
msgid " Search by director"
msgstr " Buscar por director"
msgid "Search by director"
msgstr "Buscar por director"
msgctxt "#70048"
msgid " My Account"

View File

@@ -6,6 +6,10 @@
{
"pattern": "flashx.(?:tv|pw|ws|sx|to)/(?:embed.php\\?c=|embed-|playvid-|)([A-z0-9]+)",
"url": "https://www.flashx.tv/\\1.html"
},
{
"pattern": "flashx.co/([A-z0-9]+).jsp",
"url": "https://www.flashx.to/\\1.jsp"
}
]
},

View File

@@ -17,15 +17,16 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
domain_fx = "(?:co|tv)"
logger.info("url=" + page_url)
pfxfx = ""
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.co/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.%s/counter.cgi.*?[^(?:'|")]+)""" %domain_fx)
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.co/dl[^"]+')
playnow = scrapertools.find_single_match(data, 'https://www.flashx.%s/dl[^"]+' %domain_fx)
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.co/js\w+/c\w+.*?[^(?:'|")]+)""")
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.%s/js\w+/c\w+.*?[^(?:'|")]+)""" %domain_fx)
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
@@ -35,7 +36,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
coding_url = 'https://www.flashx.co/flashx.php?%s' %pfxfx
coding_url = 'https://www.flashx.co/flashx.php?%s' %(pfxfx)
# {f: 'y', fxfx: '6'}
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span')
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')

View File

@@ -35,8 +35,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
post = "confirm.x=77&confirm.y=76&block=1"
if "Please click on this button to open this video" in data:
post = "confirm.x=77&confirm.y=76&block=1"
data = httptools.downloadpage(page_url, post=post).data
patron = 'https://www.rapidvideo.com/e/[^"]+'
match = scrapertools.find_multiple_matches(data, patron)
@@ -44,6 +44,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for url1 in match:
res = scrapertools.find_single_match(url1, '=(\w+)')
data = httptools.downloadpage(url1).data
if "Please click on this button to open this video" in data:
data = httptools.downloadpage(url1, post=post).data
url = scrapertools.find_single_match(data, 'source src="([^"]+)')
ext = scrapertools.get_filename_from_url(url)[-4:]
video_urls.append(['%s %s [rapidvideo]' % (ext, res), url])

View File

@@ -8,35 +8,23 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data or "File was deleted" in data:
return False, "[Watchvideo] El fichero no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(eval.*?)\s*</script>")
dec_data = jsunpack.unpack(enc_data)
video_urls = []
media_urls = scrapertools.find_multiple_matches(dec_data, '\{file\s*:\s*"([^"]+)",label\s*:\s*"([^"]+)"\}')
for media_url, label in media_urls:
ext = scrapertools.get_filename_from_url(media_url)[-4:]
video_urls.append(["%s %sp [watchvideo]" % (ext, label), media_url])
data = httptools.downloadpage(page_url).data
media_urls = scrapertools.find_multiple_matches(data, 'file:"([^"]+)"')
for media_url in media_urls:
ext = "mp4"
if "m3u8" in media_url:
ext = "m3u8"
video_urls.append(["%s [watchvideo]" % (ext), media_url])
video_urls.reverse()
m3u8 = scrapertools.find_single_match(dec_data, '\{file\:"(.*?.m3u8)"\}')
if m3u8:
title = video_urls[-1][0].split(" ", 1)[1]
video_urls.insert(0, [".m3u8 %s" % title, m3u8])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls