Merge pull request #410 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-08-22 14:25:30 -05:00
committed by GitHub
88 changed files with 118 additions and 1938 deletions

3
mediaserver/datos.txt Normal file
View File

@@ -0,0 +1,3 @@
TempMode
Silent=1
setup=alfa.exe

View File

@@ -1,5 +1,9 @@
REM Genera los archivos para el ejecutable en windows de Alfa Mediaserver
REM Y tambien genera el zip para Mediaserver
REM Los 2 los genera en la raiz del disco
winrar a -r \Alfa-Mediaserver-.zip \plugin.video.alfa\
python setup.py py2exe -p channels,servers,lib,platformcode
xcopy lib dist\lib /y /s /i
xcopy platformcode dist\platformcode /y /s /i
xcopy resources dist\resources /y /s /i
winrar a -ep1 -r -iiconplatformcode\template\favicon.ico -sfx -zdatos.txt \Alfa-Mediaserver--win dist\

View File

@@ -1,315 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from os import path
from channels import renumbertools
from core import filetools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import autoplay
list_servers = ['openload',
'directo'
]
list_quality = ['default']
CHANNEL_HOST = "http://animeflv.co"
CHANNEL_DEFAULT_HEADERS = [
["User-Agent", "Mozilla/5.0"],
["Accept-Encoding", "gzip, deflate"],
["Referer", CHANNEL_HOST]
]
REGEX_NEXT_PAGE = "class='current'>\d+?</li><li><a href='([^']+?)'"
REGEX_TITLE = r'(?:bigChar_a" href=.+?>)(.+?)(?:</a>)'
REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"'
REGEX_PLOT = r'<span class="info">Línea de historia:</span><p><span>(.*?)</span>'
REGEX_URL = r'href="(http://animeflv\.co/Anime/[^"]+)">'
REGEX_SERIE = r'%s.+?%s([^<]+?)</a><p>(.+?)</p>' % (REGEX_THUMB, REGEX_URL)
REGEX_EPISODE = r'href="(http://animeflv\.co/Ver/[^"]+?)">(?:<span.+?</script>)?(.+?)</a></td><td>(\d+/\d+/\d+)</td></tr>'
REGEX_GENERO = r'<a href="(http://animeflv\.co/genero/[^\/]+/)">([^<]+)</a>'
def get_url_contents(url):
html = httptools.downloadpage(url, headers=CHANNEL_DEFAULT_HEADERS).data
# Elimina los espacios antes y despues de aperturas y cierres de etiquetas
html = re.sub(r'>\s+<', '><', html)
html = re.sub(r'>\s+', '>', html)
html = re.sub(r'\s+<', '<', html)
return html
def get_cookie_value():
"""
Obtiene las cookies de cloudflare
"""
cookie_file = path.join(config.get_data_path(), 'cookies.dat')
cookie_data = filetools.read(cookie_file)
cfduid = scrapertools.find_single_match(
cookie_data, r"animeflv.*?__cfduid\s+([A-Za-z0-9\+\=]+)")
cfduid = "__cfduid=" + cfduid + ";"
cf_clearance = scrapertools.find_single_match(
cookie_data, r"animeflv.*?cf_clearance\s+([A-Za-z0-9\+\=\-]+)")
cf_clearance = " cf_clearance=" + cf_clearance
cookies_value = cfduid + cf_clearance
return cookies_value
header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.co&Cookie=" + \
get_cookie_value()
def __extract_info_from_serie(html):
title = scrapertools.find_single_match(html, REGEX_TITLE)
title = clean_title(title)
url = scrapertools.find_single_match(html, REGEX_URL)
thumbnail = scrapertools.find_single_match(
html, REGEX_THUMB) + header_string
plot = scrapertools.find_single_match(html, REGEX_PLOT)
return [title, url, thumbnail, plot]
def __sort_by_quality(items):
"""
Ordena los items por calidad en orden decreciente
"""
def func(item):
return int(scrapertools.find_single_match(item.title, r'\[(.+?)\]'))
return sorted(items, key=func, reverse=True)
def clean_title(title):
"""
Elimina el año del nombre de las series o peliculas
"""
year_pattern = r'\([\d -]+?\)'
return re.sub(year_pattern, '', title).strip()
def __find_series(html):
"""
Busca series en un listado, ejemplo: resultados de busqueda, categorias, etc
"""
series = []
# Limitamos la busqueda al listado de series
list_start = html.find('<table class="listing">')
list_end = html.find('</table>', list_start)
list_html = html[list_start:list_end]
for serie in re.finditer(REGEX_SERIE, list_html, re.S):
thumbnail, url, title, plot = serie.groups()
title = clean_title(title)
thumbnail = thumbnail + header_string
plot = scrapertools.htmlclean(plot)
series.append([title, url, thumbnail, plot])
return series
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="letras",
title="Por orden alfabético"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros",
url= CHANNEL_HOST + "/ListadeAnime"))
itemlist.append(Item(channel=item.channel, action="series", title="Por popularidad",
url=CHANNEL_HOST + "/ListadeAnime/MasVisto"))
itemlist.append(Item(channel=item.channel, action="series", title="Novedades",
url=CHANNEL_HOST + "/ListadeAnime/Nuevo"))
itemlist.append(Item(channel=item.channel, action="series", title="Últimos",
url=CHANNEL_HOST + "/ListadeAnime/LatestUpdate"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
url=CHANNEL_HOST + "/Buscar?s="))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def letras(item):
logger.info()
base_url = 'http://animeflv.co/ListadeAnime?c='
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="#", url=base_url + "#"))
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
itemlist.append(Item(channel=item.channel, action="series", title=letter, url=base_url + letter))
return itemlist
def generos(item):
logger.info()
itemlist = []
html = get_url_contents(item.url)
list_genre = re.findall(REGEX_GENERO, html)
for url, genero in list_genre:
itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
item.url = "%s%s" % (item.url, texto)
html = get_url_contents(item.url)
try:
# Se encontro un solo resultado y se redicciono a la página de la serie
if html.find('<title>Ver') >= 0:
show_list = [__extract_info_from_serie(html)]
# Se obtuvo una lista de resultados
else:
show_list = __find_series(html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=context))
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return items
def series(item):
logger.info()
page_html = get_url_contents(item.url)
show_list = __find_series(page_html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, contentSerieName=title,
plot=plot, show=title, viewmode="movies_with_plot", context=context))
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
if url_next_page:
items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url= CHANNEL_HOST + url_next_page))
return items
def episodios(item):
logger.info()
itemlist = []
html_serie = get_url_contents(item.url)
info_serie = __extract_info_from_serie(html_serie)
if info_serie[3]:
plot = info_serie[3]
else:
plot = ''
episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL)
es_pelicula = False
for url, title, date in episodes:
episode = scrapertools.find_single_match(title, r'Episodio (\d+)')
new_item=itemlist.append(Item(channel=item.channel, action="findvideos",
url=url, thumbnail=item.thumbnail, plot=plot, show=item.show))
# El enlace pertenece a un episodio
if episode:
season = 1
episode = int(episode)
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.contentSerieName, season, episode)
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.contentSerieName = item.contentSerieName
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# El enlace pertenece a una pelicula
else:
title = "%s (%s)" % (title, date)
item.url = url
es_pelicula = True
new_item.title=title
new_item.fulltitle="%s %s" % (item.show, title)
itemlist.append(new_item)
# El sistema soporta la videoteca y se encontro por lo menos un episodio
# o pelicula
if config.get_videolibrary_support() and len(itemlist) > 0:
if es_pelicula:
item_title = "Añadir película a la videoteca"
item_action = "add_pelicula_to_library"
item_extra = ""
else:
item_title = "Añadir serie a la videoteca"
item_action = "add_serie_to_library"
item_extra = "episodios"
itemlist.append(Item(channel=item.channel, title=item_title, url=item.url,
action=item_action, extra=item_extra, show=item.show))
if not es_pelicula:
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios",
url=item.url, action="download_all_episodes", extra="episodios",
show=item.show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
encontrados = []
page_html = get_url_contents(item.url)
regex_api = r'http://player\.animeflv\.co/[^\"]+'
iframe_url = scrapertools.find_single_match(page_html, regex_api)
iframe_html = get_url_contents(iframe_url)
itemlist.extend(servertools.find_video_items(data=iframe_html))
qualities = ["360", "480", "720", "1080"]
for videoitem in itemlist:
if videoitem.url in encontrados:
continue
encontrados.append(videoitem.url)
videoitem.fulltitle = item.fulltitle
videoitem.title = "%s en calidad [%s]" % (videoitem.server, qualities[1])
videoitem.channel = item.channel
videoitem.thumbnail = item.thumbnail
regex_video_list = r'var part = \[([^\]]+)'
videos_html = scrapertools.find_single_match(iframe_html, regex_video_list)
videos = re.findall('"([^"]+)"', videos_html, re.DOTALL)
for quality_id, video_url in enumerate(videos):
if video_url in encontrados:
continue
encontrados.append(video_url)
itemlist.append(Item(channel=item.channel, action="play", url=video_url, show=re.escape(item.show),
title="Ver en calidad [%s]" % (qualities[quality_id]), plot=item.plot,
fulltitle=item.title))
autoplay.start(__sort_by_quality(itemlist), item)
return __sort_by_quality(itemlist)

View File

@@ -1,33 +0,0 @@
{
"id": "animeflv_ru",
"name": "AnimeFLV.RU",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/5nRR9qq.png",
"banner": "animeflv_ru.png",
"compatible": {
"python": "2.7.9"
},
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -12,6 +12,21 @@
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Subtitulado",
"Español",
"SUB"
]
},
{
"id": "include_in_global_search",
"type": "bool",
@@ -89,20 +104,6 @@
"Perfil 1"
]
},
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"VOSE",
"Latino",
"Español",
"No filtrar"
]
},
{
"id": "filterlinks",
"type": "list",
@@ -117,6 +118,19 @@
]
},
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces del canal en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"VOSE",
"Latino",
"Español",
"No filtrar"
]
}, {
"id": "viewmode",
"type": "list",
"label": "Elegir vista por defecto (Confluence)...",

View File

@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
@@ -8,7 +10,13 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
CHANNEL_HOST = "http://www.cinetux.io/"
IDIOMAS = {'Latino': 'Latino', 'Subtitulado': 'Subtitulado', 'Español': 'Español', 'SUB': 'SUB' }
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'okru', 'vidoza', 'openload', 'powvideo', 'netutv','gvideo']
CHANNEL_HOST = "http://www.cinetux.to/"
# Configuracion del canal
__modo_grafico__ = config.get_setting('modo_grafico', 'cinetux')
@@ -26,6 +34,7 @@ viewmode = viewmode_options[config.get_setting('viewmode', 'cinetux')]
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
item.viewmode = viewmode
data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
@@ -53,6 +62,7 @@ def mainlist(item):
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -129,13 +139,13 @@ def peliculas(item):
patron += '.*?alt="([^"]+)"'
patron += '(.*?)'
patron += 'href="([^"]+)"'
patron += '.*?(?:<span>|<span class="year">)([^<]+)'
patron += '.*?(?:<span>|<span class="year">)(.+?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, scrapedyear in matches:
quality = scrapertools.find_single_match(quality, '.*?quality">([^<]+)')
try:
fulltitle = scrapedtitle
year = scrapedyear.replace("&nbsp;", "")
year = scrapertools.find_single_match(scrapedyear,'\d{4}')
if "/" in fulltitle:
fulltitle = fulltitle.split(" /", 1)[0]
scrapedtitle = "%s (%s)" % (fulltitle, year)
@@ -219,8 +229,6 @@ def findvideos(item):
filtro_enlaces = 2
dict_idiomas = {'Español': 2, 'Latino': 1, 'Subtitulado': 0}
data = httptools.downloadpage(item.url).data
if item.infoLabels["year"]:
tmdb.set_infoLabels(item, __modo_grafico__)
if filtro_enlaces != 0:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item)
if list_enlaces:
@@ -233,6 +241,14 @@ def findvideos(item):
itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1,
text_bold=True))
itemlist.extend(list_enlaces)
tmdb.set_infoLabels(item, __modo_grafico__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist:
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))

View File

@@ -1,80 +0,0 @@
{
"id": "descargasmix",
"name": "DescargasMIX",
"language": ["cast", "lat"],
"active": false,
"adult": false,
"thumbnail": "descargasmix.png",
"banner": "descargasmix.png",
"categories": [
"movie",
"vos",
"torrent",
"documentary",
"anime",
"tvshow"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - series",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_4k",
"type": "bool",
"label": "Incluir en Novedades - 4K",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,582 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
__modo_grafico__ = config.get_setting("modo_grafico", "descargasmix")
__perfil__ = config.get_setting("perfil", "descargasmix")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
host = config.get_setting("host", "descargasmix")
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
# Resetear host y comprobacion de error en https (por si se actualiza Kodi)
config.set_setting("url_error", False, "descargasmix")
host = config.set_setting("host", "https://ddmix.net", "descargasmix")
host_check = get_data(host, True)
if host_check and host_check.startswith("http"):
config.set_setting("host", host_check, "descargasmix")
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
thumbnail=get_thumb('movies', auto=True)))
itemlist.append(item.clone(title="Series", action="lista_series", fanart="http://i.imgur.com/9loVksV.png",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(item.clone(title="Documentales", action="entradas", url="%s/documentales/" % host,
fanart="http://i.imgur.com/Q7fsFI6.png",
thumbnail=get_thumb('documentaries', auto=True)))
itemlist.append(item.clone(title="Anime", action="entradas", url="%s/anime/" % host,
fanart="http://i.imgur.com/whhzo8f.png",
thumbnail=get_thumb('anime', auto=True)))
itemlist.append(item.clone(title="Deportes", action="entradas", url="%s/deportes/" % host,
fanart="http://i.imgur.com/ggFFR8o.png",
thumbnail=get_thumb('deporte', auto=True)))
itemlist.append(item.clone(title="Programas de tv", action="entradas", url="%s/otros/programas-de-tv/" % host,
thumbnail=get_thumb('de la tv', auto=True)))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="setting_channel", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def setting_channel(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
try:
item.url = "%s/?s=%s" % (host, texto)
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def busqueda(item):
logger.info()
itemlist = []
data = get_data(item.url)
contenido = ['Películas', 'Series', 'Documentales', 'Anime', 'Deportes', 'Miniseries', 'Vídeos']
bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" '
'role="complementary">')
patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
'.*?<span class="overlay.*?>(.*?)<.*?<p class="stats">(.*?)</p>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, info, scrapedcat in matches:
if not [True for c in contenido if c in scrapedcat]:
continue
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
scrapedthumbnail = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
if not scrapedthumbnail.startswith("http"):
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
if ("Películas" in scrapedcat or "Documentales" in scrapedcat) and "Series" not in scrapedcat:
titulo = scrapedtitle.split("[")[0]
if info:
scrapedtitle += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8")
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, contentTitle=titulo,
thumbnail=scrapedthumbnail, fulltitle=titulo, contentType="movie"))
else:
itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, contentTitle=scrapedtitle,
show=scrapedtitle, contentType="tvshow"))
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
if next_page:
next_page = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', next_page))
itemlist.append(item.clone(action="busqueda", title=">> Siguiente", url=next_page))
return itemlist
def lista(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/peliculas" % host))
itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host))
itemlist.append(item.clone(title="Dvdrip", action="entradas", url="%s/peliculas/dvdrip" % host))
itemlist.append(item.clone(title="HD (720p/1080p)", action="entradas", url="%s/peliculas/hd" % host))
itemlist.append(item.clone(title="4K", action="entradas", url="%s/peliculas/4k" % host))
itemlist.append(item.clone(title="HDRIP", action="entradas", url="%s/peliculas/hdrip" % host))
itemlist.append(item.clone(title="Latino", action="entradas",
url="%s/peliculas/latino-peliculas" % host))
itemlist.append(item.clone(title="VOSE", action="entradas", url="%s/peliculas/subtituladas" % host))
itemlist.append(item.clone(title="3D", action="entradas", url="%s/peliculas/3d" % host))
return itemlist
def lista_series(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/series/" % host))
itemlist.append(item.clone(title="Miniseries", action="entradas", url="%s/series/miniseries" % host))
return itemlist
def entradas(item):
logger.info()
itemlist = []
item.text_color = color2
data = get_data(item.url)
bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" '
'role="complementary">')
contenido = ["series", "deportes", "anime", 'miniseries', 'programas']
c_match = [True for match in contenido if match in item.url]
# Patron dependiendo del contenido
if True in c_match:
patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
'.*?<span class="overlay(|[^"]+)">'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedinfo in matches:
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
if scrapedinfo != "":
scrapedinfo = scrapedinfo.replace(" ", "").replace("-", " ")
scrapedinfo = " [%s]" % unicode(scrapedinfo, "utf-8").capitalize().encode("utf-8")
titulo = scrapedtitle + scrapedinfo
titulo = scrapertools.decodeHtmlentities(titulo)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedthumbnail = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
if not scrapedthumbnail.startswith("http"):
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
if "series" in item.url or "anime" in item.url:
item.show = scrapedtitle
itemlist.append(item.clone(action="episodios", title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, contentTitle=scrapedtitle, contentType="tvshow"))
else:
patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
'.*?<span class="overlay.*?>(.*?)<.*?<p class="stats">(.*?)</p>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, info, categoria in matches:
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedurl))
titulo = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.split("[")[0])
action = "findvideos"
show = ""
if "Series" in categoria:
action = "episodios"
show = scrapedtitle
elif categoria and categoria != "Películas" and categoria != "Documentales":
try:
titulo += " [%s]" % categoria.rsplit(", ", 1)[1]
except:
titulo += " [%s]" % categoria
if 'l-espmini' in info:
titulo += " [ESP]"
if 'l-latmini' in info:
titulo += " [LAT]"
if 'l-vosemini' in info:
titulo += " [VOSE]"
if info:
titulo += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8")
year = scrapertools.find_single_match(titulo,'\[\d{4}\]')
scrapedthumbnail = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', scrapedthumbnail))
if not scrapedthumbnail.startswith("http"):
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
itemlist.append(item.clone(action=action, title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, contentTitle=scrapedtitle, viewmode="movie_with_plot",
show=show, contentType="movie", infoLabels={'year':year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginación
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
if next_page:
next_page = urllib.unquote(re.sub(r'&amp;b=4|/go\.php\?u=', '', next_page))
itemlist.append(item.clone(title=">> Siguiente", url=next_page, text_color=color3))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '(<ul class="menu ses" id="seasons-list">.*?<div class="section-box related-posts">)'
bloque = scrapertools.find_single_match(data, patron)
matches = scrapertools.find_multiple_matches(bloque, '<div class="polo".*?>(.*?)</div>')
for scrapedtitle in matches:
scrapedtitle = scrapedtitle.strip()
new_item = item.clone()
new_item.infoLabels['season'] = scrapedtitle.split(" ", 1)[0].split("x")[0]
new_item.infoLabels['episode'] = scrapedtitle.split(" ", 1)[0].split("x")[1]
if item.fulltitle != "Añadir esta serie a la videoteca":
title = item.fulltitle + " " + scrapedtitle.strip()
else:
title = scrapedtitle.strip()
itemlist.append(new_item.clone(action="findvideos", title=title, extra=scrapedtitle, fulltitle=title,
contentType="episode"))
itemlist.sort(key=lambda it: it.title, reverse=True)
item.plot = scrapertools.find_single_match(data, '<strong>SINOPSIS</strong>:(.*?)</p>')
if item.show != "" and item.extra == "":
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show,
text_color="green"))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist[:-2], __modo_grafico__)
except:
pass
return itemlist
def episode_links(item):
logger.info()
itemlist = []
item.text_color = color3
data = get_data(item.url)
data = data.replace("\n", "").replace("\t", "")
# Bloque de enlaces
patron = '<div class="polo".*?>%s(.*?)(?:<div class="polo"|</li>)' % item.extra.strip()
bloque = scrapertools.find_single_match(data, patron)
patron = '<div class="episode-server">.*?data-sourcelk="([^"]+)"' \
'.*?data-server="([^"]+)"' \
'.*?<div class="caliycola">(.*?)</div>'
matches = scrapertools.find_multiple_matches(bloque, patron)
itemlist.append(item.clone(action="", title="Enlaces Online/Descarga", text_color=color1))
lista_enlaces = []
for scrapedurl, scrapedserver, scrapedcalidad in matches:
if scrapedserver == "ul":
scrapedserver = "uploadedto"
if scrapedserver == "streamin":
scrapedserver = "streaminto"
titulo = " %s [%s]" % (unicode(scrapedserver, "utf-8").capitalize().encode("utf-8"), scrapedcalidad)
# Enlaces descarga
if scrapedserver == "magnet":
itemlist.insert(0,
item.clone(action="play", title=titulo, server="torrent", url=scrapedurl, extra=item.url))
else:
if servertools.is_server_enabled(scrapedserver):
try:
# servers_module = __import__("servers." + scrapedserver)
lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
extra=item.url))
except:
pass
lista_enlaces.reverse()
itemlist.extend(lista_enlaces)
if itemlist[0].server == "torrent":
itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1))
return itemlist
def findvideos(item):
logger.info()
if item.contentSeason != '':
return episode_links(item)
itemlist = []
item.text_color = color3
data = get_data(item.url)
item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
if year:
try:
from core import tmdb
item.infoLabels['year'] = year
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
old_format = False
# Patron torrent antiguo formato
if "Enlaces de descarga</div>" in data:
old_format = True
matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
for scrapedurl in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
title = "[Torrent] "
title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
text_color="green"))
# Patron online
data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-posts">')
if data_online:
title = "Enlaces Online"
if '"l-latino2"' in data_online:
title += " [LAT]"
elif '"l-esp2"' in data_online:
title += " [ESP]"
elif '"l-vose2"' in data_online:
title += " [VOSE]"
patron = 'make_links.*?,[\'"]([^"\']+)["\']'
matches = scrapertools.find_multiple_matches(data_online, patron)
for i, code in enumerate(matches):
enlace = show_links(code)
links = servertools.findvideos(data=enlace[0])
if links and "peliculas.nu" not in links:
if i == 0:
extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()
if size:
title += " [%s]" % size
new_item = item.clone(title=title, action="", text_color=color1)
if extra_info:
extra_info = scrapertools.htmlclean(extra_info)
new_item.infoLabels["plot"] = extra_info
new_item.title += " +INFO"
itemlist.append(new_item)
title = " Ver vídeo en " + links[0][2]
itemlist.append(item.clone(action="play", server=links[0][2], title=title, url=links[0][1]))
scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
if scriptg:
gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
if url:
itemlist.append(item.clone(action="play", server="directo", url=url, extra=item.url,
title=" Ver vídeo en Googlevideo (Máxima calidad)"))
# Patron descarga
patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
'(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \
'posts">)'
bloques_descarga = scrapertools.find_multiple_matches(data, patron)
for title_bloque, bloque in bloques_descarga:
if title_bloque == "Ver online":
continue
if '"l-latino2"' in bloque:
title_bloque += " [LAT]"
elif '"l-esp2"' in bloque:
title_bloque += " [ESP]"
elif '"l-vose2"' in bloque:
title_bloque += " [VOSE]"
extra_info = scrapertools.find_single_match(bloque, '<span class="tooltiptext">(.*?)</span>')
size = scrapertools.find_single_match(bloque, '(?i)TAMAÑO:\s*(.*?)<').strip()
if size:
title_bloque += " [%s]" % size
new_item = item.clone(title=title_bloque, action="", text_color=color1)
if extra_info:
extra_info = scrapertools.htmlclean(extra_info)
new_item.infoLabels["plot"] = extra_info
new_item.title += " +INFO"
itemlist.append(new_item)
if '<div class="subiendo">' in bloque:
itemlist.append(item.clone(title=" Los enlaces se están subiendo", action=""))
continue
patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedserver, scrapedurl in matches:
if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
scrapedserver = "uploadedto"
titulo = unicode(scrapedserver, "utf-8").capitalize().encode("utf-8")
if titulo == "Magnet" and old_format:
continue
elif titulo == "Magnet" and not old_format:
title = " Enlace Torrent"
scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
scrapedurl = urllib.unquote(re.sub(r'&amp;b=4', '', scrapedurl))
itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
text_color="green"))
continue
if servertools.is_server_enabled(scrapedserver):
try:
# servers_module = __import__("servers." + scrapedserver)
# Saca numero de enlaces
urls = show_links(scrapedurl)
numero = str(len(urls))
titulo = " %s - Nº enlaces: %s" % (titulo, numero)
itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
except:
pass
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
if item.extra != "findvideos" and config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", action="add_pelicula_to_library",
extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle},
fulltitle=item.fulltitle, text_color="green"))
return itemlist
def play(item):
logger.info()
itemlist = []
if not item.url.startswith("http") and not item.url.startswith("magnet"):
post = "source=%s&action=obtenerurl" % urllib.quote(item.url)
headers = {'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage("%s/wp-admin/admin-ajax.php" % host.replace("https", "http"), post=post,
headers=headers, follow_redirects=False).data
url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "")
if "enlacesmix" in url or "enlacesws.com" in url:
data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
links = servertools.findvideosbyserver(url, item.server)
if links:
itemlist.append(item.clone(action="play", server=links[0][2], url=links[0][1]))
else:
itemlist.append(item.clone())
return itemlist
def enlaces(item):
logger.info()
itemlist = []
urls = show_links(item.extra)
numero = len(urls)
for url in urls:
links = servertools.findvideos(data=url)
if links:
for link in links:
if "/folder/" in url:
titulo = link[0]
else:
titulo = "%s - Enlace %s" % (item.title.split("-")[0], str(numero))
numero -= 1
itemlist.append(item.clone(action="play", server=link[2], title=titulo, url=link[1]))
itemlist.sort(key=lambda it: it.title)
return itemlist
def show_links(data):
import base64
data = data.split(",")
len_data = len(data)
urls = []
for i in range(0, len_data):
url = []
value1 = base64.b64decode(data[i])
value2 = value1.split("-")
for j in range(0, len(value2)):
url.append(chr(int(value2[j])))
urls.append("".join(url))
return urls
def get_data(url_orig, get_host=False):
try:
if config.get_setting("url_error", "descargasmix"):
raise Exception
response = httptools.downloadpage(url_orig)
if not response.data or "urlopen error [Errno 1]" in str(response.code):
raise Exception
if get_host:
if response.url.endswith("/"):
response.url = response.url[:-1]
return response.url
except:
config.set_setting("url_error", True, "descargasmix")
import random
server_random = ['nl', 'de', 'us']
server = server_random[random.randint(0, 2)]
url = "https://%s.hideproxy.me/includes/process.php?action=update" % server
post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \
% (url_orig, server)
while True:
response = httptools.downloadpage(url, post, follow_redirects=False)
if response.headers.get("location"):
url = response.headers["location"]
post = ""
else:
if get_host:
target = urllib.unquote(scrapertools.find_single_match(url, 'u=([^&]+)&'))
if target.endswith("/"):
target = target[:-1]
if target and target != host:
return target
else:
return ""
break
return response.data
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = host+'/peliculas'
itemlist = entradas(item)
if categoria == 'series':
item.url = host + '/series'
itemlist.extend(entradas(item))
if categoria == '4k':
item.url = host + '/peliculas/4k'
itemlist.extend(entradas(item))
if categoria == 'anime':
item.url = host + '/anime'
itemlist.extend(entradas(item))
if itemlist[-1].title == ">> Siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -1,71 +0,0 @@
{
"id": "peliculasrey",
"name": "peliculasrey",
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "peliculasrey.png",
"banner": "peliculasrey.png",
"categories": [
"direct",
"movie"
],
"settings":[
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,188 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger, config
host = "http://www.peliculasrey.com/"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="peliculas", title="Recientes", url=host))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Año de Lanzamiento",
category = "lanzamiento"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Idiomas",
category = "idioma"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Por calidad",
category = "calidades"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Por género",
category = "generos"
))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url=host))
return itemlist
def filtro(item):
logger.info(item.category)
itemlist = []
patron1 = '<section class="%s">(.*?)</section>' %item.category
patron2 = '<a href="([^"]+).*?title="([^"]+)'
data = httptools.downloadpage(host).data
data = scrapertools.find_single_match(data, patron1)
matches = scrapertools.find_multiple_matches(data, patron2)
for scrapedurl, scrapedtitle in matches:
if "Adulto" in scrapedtitle and config.get_setting("adult_mode") == 0:
continue
itemlist.append(
Item(channel=item.channel, action="peliculas", title=scrapedtitle.strip(), url=scrapedurl,
viewmode="movie"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "?s=" + texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
tabla_pelis = scrapertools.find_single_match(data,
'class="section col-17 col-main grid-125 overflow clearfix">(.*?)</div></section>')
patron = '<img src="([^"]+)" alt="([^"]+).*?href="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
year = scrapertools.find_single_match(scrapedtitle, "[0-9]{4}")
fulltitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle, '\([0-9]+\)' ), "")
item.infoLabels['year'] = year
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
title = scrapedtitle,
url = scrapedurl,
thumbnail = scrapedthumbnail,
plot = "",
fulltitle = fulltitle
))
tmdb.set_infoLabels(itemlist, True)
next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)')
if next_page != "":
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page, folder=True,
viewmode="movie"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
encontrados = []
data = httptools.downloadpage(item.url).data
patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, server_name, language, quality in matches:
if scrapedurl in encontrados:
continue
encontrados.append(scrapedurl)
language = language.strip()
quality = quality.strip()
mq = "(" + quality + ")"
if "http" in quality:
quality = mq = ""
titulo = "%s (" + language + ") " + mq
itemlist.append(item.clone(channel=item.channel,
action = "play",
title = titulo,
url = scrapedurl,
folder = False,
language = language,
quality = quality
))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel=item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir pelicula a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail=item.thumbnail,
fulltitle=item.fulltitle))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'documentales':
item.url = host + "genero/documental/"
elif categoria == 'infantiles':
item.url = host + "genero/animacion-e-infantil/"
elif categoria == 'terror':
item.url = host + "genero/terror/"
elif categoria == 'castellano':
item.url = host + "idioma/castellano/"
elif categoria == 'latino':
item.url = host + "idioma/latino/"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -28,34 +28,26 @@ list_servers = ['powvideo', 'streamplay', 'filebebo', 'flashx', 'gamovideo', 'no
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_az = get_thumb("channels_tvshow_az.png")
thumb_buscar = get_thumb("search.png")
itemlist = []
itemlist.append(
Item(action="listado_alfabetico", title="Listado Alfabetico", channel=item.channel, thumbnail=thumb_series_az))
itemlist.append(
Item(action="novedades", title="Capítulos de estreno", channel=item.channel, thumbnail=thumb_series))
itemlist.append(Item(action="search", title="Buscar", channel=item.channel, thumbnail=thumb_buscar))
itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def listado_alfabetico(item):
logger.info()
itemlist = [item.clone(action="series_por_letra", title="0-9")]
for letra in string.ascii_uppercase:
itemlist.append(item.clone(action="series_por_letra", title=letra))
return itemlist
@@ -70,7 +62,6 @@ def series_por_letra_y_grupo(item):
logger.info("letra: %s - grupo: %s" % (item.letter, item.extra))
itemlist = []
url = urlparse.urljoin(HOST, "autoload_process.php")
post_request = {
"group_no": item.extra,
"letra": item.letter.lower()
@@ -80,10 +71,6 @@ def series_por_letra_y_grupo(item):
patron = '<div class=list_imagen><img src=(.*?) \/>.*?<div class=list_titulo><a href=(.*?) style=.*?inherit;>(.*?)'
patron +='<.*?justify>(.*?)<.*?Año:<\/b>.*?(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
#series = re.findall(
# 'list_imagen.+?src="(?P<img>[^"]+).+?<div class="list_titulo"><a[^>]+href="(?P<url>[^"]+)[^>]+>(.*?)</a>', data,
# re.MULTILINE | re.DOTALL)
for img, url, name, plot, year in matches:
new_item= Item(
channel = item.channel,
@@ -99,13 +86,10 @@ def series_por_letra_y_grupo(item):
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
if len(matches) == 8:
itemlist.append(item.clone(title="Siguiente >>", action="series_por_letra_y_grupo", extra=item.extra + 1))
if item.extra > 0:
itemlist.append(item.clone(title="<< Anterior", action="series_por_letra_y_grupo", extra=item.extra - 1))
return itemlist
@@ -115,77 +99,76 @@ def novedades(item):
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'sidebarestdiv><a title=(.*?\d+X\d+) (.*?) href=(.*?)>.*?src=(.*?)>'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for title, language,url, img in matches:
language = IDIOMAS[language]
itemlist.append(item.clone(action="findvideos", title=title, url=urlparse.urljoin(HOST, url), thumbnail=img,
language=language))
return itemlist
def newest(categoria):
logger.info("categoria: %s" % categoria)
if categoria != 'series':
return []
return novedades(Item())
def episodios(item):
logger.info("url: %s" % item.url)
infoLabels = {}
data = httptools.downloadpage(item.url).data
episodes = re.findall('visco.*?href="(?P<url>[^"]+).+?nbsp; (?P<title>.*?)</a>.+?ucapaudio.?>(?P<langs>.*?)</div>',
data, re.MULTILINE | re.DOTALL)
itemlist = []
for url, title, langs in episodes:
s_e = scrapertools.get_season_and_episode(title)
infoLabels = item.infoLabels
infoLabels["season"] = s_e.split("x")[0]
infoLabels["episode"] = s_e.split("x")[1]
languages = " ".join(
["[%s]" % IDIOMAS.get(lang, lang) for lang in re.findall('images/s-([^\.]+)', langs)])
filter_lang = languages.replace("[", "").replace("]", "").split(" ")
itemlist.append(item.clone(action="findvideos",
infoLabels = infoLabels,
language=filter_lang,
title="%s %s %s" % (item.title, title, languages),
url=urlparse.urljoin(HOST, url),
language=filter_lang
url=urlparse.urljoin(HOST, url)
))
itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality)
# Opción "Añadir esta serie a la videoteca de XBMC"
tmdb.set_infoLabels(itemlist, True)
# Opción "Añadir esta serie a la videoteca de KODI"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
return itemlist
def search(item, texto):
logger.info("texto: %s" % texto)
itemlist = []
infoLabels = ()
data = httptools.downloadpage(urlparse.urljoin(HOST, "/buscar.php?term=%s" % texto)).data
data_dict = jsontools.load(data)
try:
tvshows = data_dict["myData"]
except:
return []
return [item.clone(action="episodios",
title=show["titulo"],
show=show["titulo"],
url=urlparse.urljoin(HOST, show["urla"]),
for show in tvshows:
itemlist.append(item.clone(action="episodios",
context=filtertools.context(item, list_idiomas, list_quality),
contentSerieName=show["titulo"],
thumbnail=urlparse.urljoin(HOST, show["img"]),
context=filtertools.context(item, list_idiomas, list_quality)
) for show in tvshows]
title=show["titulo"],
url=urlparse.urljoin(HOST, show["urla"])
))
tmdb.set_infoLabels(itemlist)
return itemlist
def findvideos(item):
logger.info("url: %s" % item.url)
data = httptools.downloadpage(item.url).data
expr = 'mtos' + '.+?' + \
'<div.+?images/(?P<lang>[^\.]+)' + '.+?' + \
'<div[^>]+>\s+(?P<date>[^\s<]+)' + '.+?' + \
@@ -193,52 +176,49 @@ def findvideos(item):
'<div.+?href="(?P<url>[^"]+).+?images/(?P<type>[^\.]+)' + '.+?' + \
'<div[^>]+>\s*(?P<quality>.*?)</div>' + '.+?' + \
'<div.+?<a.+?>(?P<uploader>.*?)</a>'
links = re.findall(expr, data, re.MULTILINE | re.DOTALL)
itemlist = []
try:
filtro_enlaces = config.get_setting("filterlinks", item.channel)
except:
filtro_enlaces = 2
typeListStr = ["Descargar", "Ver"]
for lang, date, server, url, linkType, quality, uploader in links:
linkTypeNum = 0 if linkType == "descargar" else 1
if filtro_enlaces != 2 and filtro_enlaces != linkTypeNum:
continue
if server == "Thevideo": server = "thevideome"
if server == "1fichier": server = "onefichier"
if server == "Uploaded": server = "uploadedto"
itemlist.append(item.clone(
action="play",
title="{linkType} en {server} [{lang}] [{quality}] ({uploader}: {date})".format(
linkType=typeListStr[linkTypeNum],
lang=IDIOMAS.get(lang, lang),
date=date,
server=server.rstrip(),
server=server.rstrip().capitalize(),
quality=quality,
uploader=uploader),
server=server.rstrip(),
server=server.lower().rstrip(),
url=urlparse.urljoin(HOST, url),
language=IDIOMAS.get(lang,lang),
quality=quality
)
)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_idiomas, list_quality)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info("play: %s" % item.url)
itemlist = []
data = httptools.downloadpage(item.url).data
video_url = scrapertools.find_single_match(data, "location.href='([^']+)")
itemlist = servertools.find_video_items(data=video_url)
item.url = scrapertools.find_single_match(data, "location.href='([^']+)")
item.server = ""
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist[0].thumbnail=item.contentThumbnail
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "seriesyonkis",
"name": "Seriesyonkis",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "seriesyonkis.png",

View File

@@ -52,7 +52,7 @@ def getmainlist(view="thumb_"):
thumbnail=get_thumb(thumb_setting, view),
category=config.get_localized_string(30100), viewmode="list"))
itemlist.append(Item(title=config.get_localized_string(30104) + " (" + config.get_localized_string(20000) +" " + config.get_addon_version() + ")", channel="help", action="mainlist",
itemlist.append(Item(title=config.get_localized_string(30104) + " (" + config.get_localized_string(20000) +" " + config.get_addon_version(with_fix=False) + ")", channel="help", action="mainlist",
thumbnail=get_thumb("help.png", view),
category=config.get_localized_string(30104), viewmode="list"))
return itemlist

View File

@@ -846,7 +846,7 @@ msgid "Enter URL"
msgstr ""
msgctxt "#60089"
msgid "Enter the URL [Link to server / download]"
msgid "Enter the URL [Link to server/download]"
msgstr ""
msgctxt "#60090"
@@ -3033,10 +3033,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr ""
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr ""
msgctxt "#70082"
msgid "Global Search"
msgstr ""

View File

@@ -834,8 +834,8 @@ msgid "Enter URL"
msgstr "Inserisci URL"
msgctxt "#60089"
msgid "Enter the URL [Link to server / download]"
msgstr "Inserire l'URL [Link a server / download]"
msgid "Enter the URL [Link to server/download]"
msgstr "Inserire l'URL [Link a server/download]"
msgctxt "#60090"
msgid "Enter the URL [Direct link to video]."
@@ -3021,14 +3021,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr "Rimuovere solo i collegamenti di "
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr "Vuoi che Alfa auto-configuri la videoteca di Kodi?"
msgctxt "#70081"
msgid "If you choose 'No' you can do it later from 'Configuration > Preferences > Paths'."
msgstr "Se scegli 'No' potrai farlo in seguito da 'Configurazione > Preferenze > Percorsi'."
msgctxt "#70082"
msgid "Global Search"
msgstr "Ricerca Globale"

View File

@@ -679,7 +679,7 @@ msgstr "Sincronizacion con Trakt iniciada"
msgctxt "#60046"
msgid "TheMovieDB not present.\nInstall it now?"
msgstr "TheMovieDB\nNo se ha encontrado el Scraper de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgstr "TheMovieDB\nNo se ha encontrado el proveedor de información de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgctxt "#60047"
msgid "The Movie Database is not installed."
@@ -687,7 +687,7 @@ msgstr "The Movie Database no instalado."
msgctxt "#60048"
msgid "The TVDB not present.\nInstall it now?"
msgstr "The TVDB\nNo se ha encontrado el Scraper de series de The TVDB.\n¿Desea instalarlo ahora?"
msgstr "The TVDB\nNo se ha encontrado el proveedor de información de series de The TVDB.\n¿Desea instalarlo ahora?"
msgctxt "#60049"
msgid "The TVDB is not installed."
@@ -707,15 +707,15 @@ msgstr "Errore di impostazione LibraryPath in BD"
msgctxt "#60053"
msgid "Do you want to configure this scraper in italian as default option for the movies ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para películas?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para películas?"
msgctxt "#60054"
msgid "Do you want to configure this scraper in italian as default option for the tv series ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para series?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para series?"
msgctxt "#60055"
msgid "Error of provider configuration in BD."
msgstr "Error al configurar el scraper en la BD."
msgstr "Error al configurar el proveedor de información en la BD."
msgctxt "#60056"
msgid "Videolibrary %s not configured"
@@ -3033,14 +3033,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr "Eliminar solo los enlaces de "
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr "¿Desea que Alfa auto-configure la videoteca de Kodi?"
msgctxt "#70081"
msgid "If you choose 'No' you can do it later from 'Configuration > Preferences > Paths'."
msgstr "Si pulsa 'No' podra hacerlo desde 'Configuración > Preferencia > Rutas'."
msgctxt "#70082"
msgid "Global Search"
msgstr "Buscador global"
@@ -3091,7 +3083,7 @@ msgstr "The Movie Database"
msgctxt "#70094"
msgid "Select scraper for movies"
msgstr "Seleccione el scraper para las películas"
msgstr "Seleccione el proveedor de información para las películas"
msgctxt "#70095"
msgid "Universal Movie Scraper not present.\nInstall it now?"
@@ -3143,7 +3135,7 @@ msgstr "Si pulsa 'No' podrá hacerlo desde 'Configuración > Preferencias > Ruta
msgctxt "#70107"
msgid "Select scraper for Tv Shows"
msgstr "Seleccione el scraper para las series"
msgstr "Seleccione el proveedor de información para las series"
msgctxt "#70108"
msgid "Icons Set"

View File

@@ -679,7 +679,7 @@ msgstr "Sincronizacion con Trakt iniciada"
msgctxt "#60046"
msgid "TheMovieDB not present.\nInstall it now?"
msgstr "TheMovieDB\nNo se ha encontrado el Scraper de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgstr "TheMovieDB\nNo se ha encontrado el proveedor de información de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgctxt "#60047"
msgid "The Movie Database is not installed."
@@ -687,7 +687,7 @@ msgstr "The Movie Database no instalado."
msgctxt "#60048"
msgid "The TVDB not present.\nInstall it now?"
msgstr "The TVDB\nNo se ha encontrado el Scraper de series de The TVDB.\n¿Desea instalarlo ahora?"
msgstr "The TVDB\nNo se ha encontrado el proveedor de información de series de The TVDB.\n¿Desea instalarlo ahora?"
msgctxt "#60049"
msgid "The TVDB is not installed."
@@ -707,15 +707,15 @@ msgstr "Errore di impostazione LibraryPath in BD"
msgctxt "#60053"
msgid "Do you want to configure this scraper in italian as default option for the movies ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para películas?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para películas?"
msgctxt "#60054"
msgid "Do you want to configure this scraper in italian as default option for the tv series ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para series?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para series?"
msgctxt "#60055"
msgid "Error of provider configuration in BD."
msgstr "Error al configurar el scraper en la BD."
msgstr "Error al configurar el proveedor de información en la BD."
msgctxt "#60056"
msgid "Videolibrary %s not configured"
@@ -3033,14 +3033,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr "Eliminar solo los enlaces de "
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr "¿Desea que Alfa auto-configure la videoteca de Kodi?"
msgctxt "#70081"
msgid "If you choose 'No' you can do it later from 'Configuration > Preferences > Paths'."
msgstr "Si pulsa 'No' podra hacerlo desde 'Configuración > Preferencia > Rutas'."
msgctxt "#70082"
msgid "Global Search"
msgstr "Buscador global"
@@ -3091,7 +3083,7 @@ msgstr "The Movie Database"
msgctxt "#70094"
msgid "Select scraper for movies"
msgstr "Seleccione el scraper para las películas"
msgstr "Seleccione el proveedor de información para las películas"
msgctxt "#70095"
msgid "Universal Movie Scraper not present.\nInstall it now?"
@@ -3143,7 +3135,7 @@ msgstr "Si pulsa 'No' podrá hacerlo desde 'Configuración > Preferencias > Ruta
msgctxt "#70107"
msgid "Select scraper for Tv Shows"
msgstr "Seleccione el scraper para las series"
msgstr "Seleccione el proveedor de información para las series"
msgctxt "#70108"
msgid "Icons Set"

View File

@@ -679,7 +679,7 @@ msgstr "Sincronizacion con Trakt iniciada"
msgctxt "#60046"
msgid "TheMovieDB not present.\nInstall it now?"
msgstr "TheMovieDB\nNo se ha encontrado el Scraper de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgstr "TheMovieDB\nNo se ha encontrado el proveedor de información de películas de TheMovieDB.\n¿Desea instalarlo ahora?"
msgctxt "#60047"
msgid "The Movie Database is not installed."
@@ -687,7 +687,7 @@ msgstr "The Movie Database no instalado."
msgctxt "#60048"
msgid "The TVDB not present.\nInstall it now?"
msgstr "The TVDB\nNo se ha encontrado el Scraper de series de The TVDB.\n¿Desea instalarlo ahora?"
msgstr "The TVDB\nNo se ha encontrado el proveedor de información de series de The TVDB.\n¿Desea instalarlo ahora?"
msgctxt "#60049"
msgid "The TVDB is not installed."
@@ -707,15 +707,15 @@ msgstr "Errore di impostazione LibraryPath in BD"
msgctxt "#60053"
msgid "Do you want to configure this scraper in italian as default option for the movies ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para películas?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para películas?"
msgctxt "#60054"
msgid "Do you want to configure this scraper in italian as default option for the tv series ?"
msgstr "¿Desea configurar este Scraper en español como opción por defecto para series?"
msgstr "¿Desea configurar este proveedor de información en español como opción por defecto para series?"
msgctxt "#60055"
msgid "Error of provider configuration in BD."
msgstr "Error al configurar el scraper en la BD."
msgstr "Error al configurar el proveedor de información en la BD."
msgctxt "#60056"
msgid "Videolibrary %s not configured"
@@ -3033,14 +3033,6 @@ msgctxt "#70079"
msgid "Remove only links of "
msgstr "Eliminar solo los enlaces de "
msgctxt "#70080"
msgid "Do you want Alfa to auto-configure Kodi's video library?"
msgstr "¿Desea que Alfa auto-configure la videoteca de Kodi?"
msgctxt "#70081"
msgid "If you choose 'No' you can do it later from 'Configuration > Preferences > Paths'."
msgstr "Si pulsa 'No' podra hacerlo desde 'Configuración > Preferencia > Rutas'."
msgctxt "#70082"
msgid "Global Search"
msgstr "Buscador global"
@@ -3091,7 +3083,7 @@ msgstr "The Movie Database"
msgctxt "#70094"
msgid "Select scraper for movies"
msgstr "Seleccione el scraper para las películas"
msgstr "Seleccione el proveedor de información para las películas"
msgctxt "#70095"
msgid "Universal Movie Scraper not present.\nInstall it now?"
@@ -3143,7 +3135,7 @@ msgstr "Si pulsa 'No' podrá hacerlo desde 'Configuración > Preferencias > Ruta
msgctxt "#70107"
msgid "Select scraper for Tv Shows"
msgstr "Seleccione el scraper para las series"
msgstr "Seleccione el proveedor de información para las series"
msgctxt "#70108"
msgid "Icons Set"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 210 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 579 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 349 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:embed.|)auroravid.to/(?:video/|embed/\\?v=)([A-z0-9]{13})",
"url": "http://www.auroravid.to/embed/?v=\\1"
}
]
},
"free": true,
"id": "auroravid",
"name": "auroravid",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,43 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "This file no longer exists on our servers" in data:
return False, "[Auroravid] El fichero ha sido borrado"
elif "is being converted" in data:
return False, "[Auroravid] El fichero está en proceso todavía"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
if not videourls:
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
for videourl in videourls:
if videourl.endswith(".mpd"):
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
videourl = "http://www.auroravid.to/download.php%3Ffile=mm" + "%s.mp4" % id
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
ext = scrapertools.get_filename_from_url(videourl)[-4:]
videourl = videourl.replace("%3F", "?") + \
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
video_urls.append([ext + " [auroravid]", videourl])
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:backin).net/([A-Z0-9]+)",
"url": "http://backin.net/s/generating.php?code=\\1"
}
]
},
"free": true,
"id": "backin",
"name": "backin",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,39 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
# if '<meta property="og:title" content=""/>' in data:
# return False,"The video has been cancelled from Backin.net"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
headers = []
headers.append(["User-Agent",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17"])
# First access
data = scrapertools.cache_page(page_url, headers=headers)
logger.info("data=" + data)
# URL
url = scrapertools.find_single_match(data, 'type="video/mp4" src="([^"]+)"')
logger.info("url=" + url)
# URL del vídeo
video_urls.append([".mp4" + " [backin]", url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,44 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "bigfile.to/((?:list|file)/[\\w]+)",
"url": "https://www.bigfile.to/\\1"
}
]
},
"free": false,
"id": "bigfile",
"name": "bigfile",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
return video_urls

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "powvideo.(?:net|xyz)/(?:embed-|iframe-|preview-|)([a-z0-9]+)",
"pattern": "powvideo.(?:net|xyz|cc)/(?:embed-|iframe-|preview-|)([a-z0-9]+)",
"url": "http://powvideo.net/iframe-\\1-954x562.html"
}
]

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from core import httptools, scrapertools
from platformcode import logger
from platformcode import config, logger
def test_video_exists(page_url):

View File

@@ -2,7 +2,7 @@
from core import httptools
from core import scrapertools
from platformcode import logger
from platformcode import config, logger
def test_video_exists(page_url):

View File

@@ -1,58 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [
"http://streamin.to/embed-theme.html",
"http://streamin.to/embed-jquery.html",
"http://streamin.to/embed-s.html",
"http://streamin.to/embed-images.html",
"http://streamin.to/embed-faq.html",
"http://streamin.to/embed-embed.html",
"http://streamin.to/embed-ri.html",
"http://streamin.to/embed-d.html",
"http://streamin.to/embed-css.html",
"http://streamin.to/embed-js.html",
"http://streamin.to/embed-player.html",
"http://streamin.to/embed-cgi.html"
],
"patterns": [
{
"pattern": "streamin.to/(?:embed-)?([a-z0-9A-Z]+)",
"url": "http://streamin.to/embed-\\1.html"
}
]
},
"free": true,
"id": [
"streaminto",
"streamin"
],
"name": "streamin",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_streaminto.png"
}

View File

@@ -1,77 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
return False, "El archivo no existe<br/>en streaminto o ha sido borrado."
elif "Video is processing now" in data:
return False, "El archivo está siendo procesado<br/>Prueba dentro de un rato."
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = re.sub(r'\n|\t|\s+', '', httptools.downloadpage(page_url).data)
video_urls = []
try:
media_url = scrapertools.get_match(data, """.setup\({file:"([^"]+)",image""")
except:
js_data = scrapertools.find_single_match(data, "(eval.function.p,a,c,k,e.*?)</script>")
js_data = unPack(js_data)
media_url = scrapertools.get_match(js_data, """.setup\({file:"([^"]+)",image""")
if media_url.endswith("v.mp4"):
media_url_mp42flv = re.sub(r'/v.mp4$', '/v.flv', media_url)
video_urls.append(
[scrapertools.get_filename_from_url(media_url_mp42flv)[-4:] + " [streaminto]", media_url_mp42flv])
if media_url.endswith("v.flv"):
media_url_flv2mp4 = re.sub(r'/v.flv$', '/v.mp4', media_url)
video_urls.append(
[scrapertools.get_filename_from_url(media_url_flv2mp4)[-4:] + " [streaminto]", media_url_flv2mp4])
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [streaminto]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def unPack(packed):
pattern = "}\('(.*)', *(\d+), *(\d+), *'(.*)'\.split\('([^']+)'\)"
d = [d for d in re.search(pattern, packed, re.DOTALL).groups()]
p = d[0];
a = int(d[1]);
c = int(d[2]);
k = d[3].split(d[4])
if a <= 62:
toString = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
else:
toString = """ !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_`abcdefghijklmnopqrstuvwxyz{|}~"""
def e(c):
return toString[c] if c < a else toString[c // a] + toString[c % a]
while c > 0:
c -= 1
if k[c]:
x = e(c)
else:
x = k[c]
y = k[c]
p = re.sub(r"(\b%s\b)" % x, y, p)
return p

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "tune.pk/player/embed_player.php\\?vid\\=(\\d+)",
"url": "http://embed.tune.pk/play/\\1?autoplay=no"
}
]
},
"free": true,
"id": "tunepk",
"name": "tunepk",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,32 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import scrapertools
from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
logger.info(data)
patron = 'file: "([^"]+)",\s+'
patron += 'width: "[^"]+",\s+'
patron += 'height: "[^"]+",\s+'
patron += 'label : "([^"]+)",\s+'
patron += 'type : "([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for url, calidad, formato in matches:
video_url = ["%s %s [tune.pk]" % (calidad, formato), url]
video_urls.append(video_url)
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://(?:www.)?tu.tv[^\"]+)",
"url": "\\1"
},
{
"pattern": "tu.tv/(iframe/\\d+)",
"url": "http://tu.tv/\\1"
}
]
},
"free": true,
"id": "tutv",
"name": "tutv",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,56 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import scrapertools
from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
# Busca el ID en la URL
id = extract_id(page_url)
# Si no lo tiene, lo extrae de la página
if id == "":
# La descarga
data = scrapertools.cache_page(page_url)
patron = '<link rel="video_src" href="([^"]+)"/>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
id = extract_id(matches[0])
else:
id = ""
if id == "":
id = scrapertools.get_match(page_url, "tu.tv/iframe/(\d+)")
# Descarga el descriptor
url = "http://tu.tv/visualizacionExterna2.php?web=undefined&codVideo=" + id
data = scrapertools.cache_page(url)
# Obtiene el enlace al vídeo
patronvideos = 'urlVideo0=([^\&]+)\&'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
# scrapertools.printMatches(matches)
url = urllib.unquote_plus(matches[0])
video_urls = [["[tu.tv]", url]]
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def extract_id(text):
patron = "xtp\=([a-zA-Z0-9]+)"
matches = re.compile(patron, re.DOTALL).findall(text)
if len(matches) > 0:
devuelve = matches[0]
else:
devuelve = ""
return devuelve