delete spanish channels

This commit is contained in:
mac12m99
2019-04-17 20:02:20 +02:00
parent b29b861ee6
commit 7a0500ae97
264 changed files with 0 additions and 56451 deletions

View File

@@ -1,12 +0,0 @@
{
"id": "abtoon",
"name": "abtoon",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "http://i.imgur.com/EpNUqsD.png",
"banner": "http://i.imgur.com/c1YTgNT.png",
"categories": [
"tvshow"
]
}

View File

@@ -1,263 +0,0 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import filtertools
from channels import autoplay
from lib import gktools
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload'
]
list_quality = ['default']
host = "https://abtoon.net"
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="lista", title="Series Actuales", url=host+'/p/actuales',
thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, action="lista", title="Series Clasicas", url=host+'/p/clasicas',
thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, action="lista", title="Series Anime", url=host + '/p/anime',
thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, action="lista", title="Series Live Action", url=host + '/p/live-action',
thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar", thumbnail=''))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
full_data = httptools.downloadpage(item.url).data
full_data = re.sub(r"\n|\r|\t|\s{2}| ", "", full_data)
data = scrapertools.find_single_match(full_data, 'class="sl">(.*?)<div class="pag">')
patron = '<a href="([^"]+)".*?<img src="([^"]+)".*?title="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for link, img, name in matches:
if " y " in name:
title=name.replace(" y "," & ")
else:
title = name
url = host + link
scrapedthumbnail = host + img
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel, title=title, url=url, action="episodios", thumbnail=scrapedthumbnail,
contentSerieName=title, context=context))
# Paginacion
next_page = scrapertools.find_single_match(full_data, '<a class="sel">\d+</a><a href="([^"]+)">\d+</a>')
if next_page != '':
itemlist.append(Item(channel=item.channel, contentSerieName=item.contentSerieName,
title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=host+next_page, action="lista"))
tmdb.set_infoLabels(itemlist)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div class="pel play" dt="(.+?)" .+?><img src="(.+?)" .+? title="(.*?)"><span class=".+?">(.+?)<\/span><a href="(.+?)" class.+?>'
matches = scrapertools.find_multiple_matches(data, patron)
# Paginacion
num_items_x_pagina = 30
min = item.page * num_items_x_pagina
min=min-item.page
max = min + num_items_x_pagina - 1
b=0
for scrapedplot,scrapedthumbnail, scrapedtitle, scrapedyear, scrapedurl in matches[min:max]:
b=b+1
url = host + scrapedurl
thumbnail = host +scrapedthumbnail
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(item.clone(title=scrapedtitle+"-"+scrapedyear, url=url, action="findvideos", thumbnail=thumbnail, plot=scrapedplot,
show=scrapedtitle,contentSerieName=scrapedtitle,context=context))
if b<29:
pass
else:
itemlist.append(
Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="peliculas", page=item.page + 1))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# obtener el numero total de episodios
total_episode = 0
patron_caps = '<li><a href="(.*?)">(.*?) - (.*?)<\/a><\/li>'
matches = scrapertools.find_multiple_matches(data, patron_caps)
patron_info = '<img src="([^"]+)" .+?>.+?<h1>([^"]+)<\/h1><p .+?>(.+?)<\/p>'
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
scrapedthumbnail = host + scrapedthumbnail
for link, cap, name in matches:
title = ""
pat = "$%&"
# varios episodios en un enlace
if len(name.split(pat)) > 1:
i = 0
for pos in name.split(pat):
i = i + 1
total_episode += 1
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, total_episode)
if len(name.split(pat)) == i:
title += "%sx%s " % (season, str(episode).zfill(2))
else:
title += "%sx%s_" % (season, str(episode).zfill(2))
else:
total_episode += 1
season, episode = renumbertools.numbered_for_tratk(item.channel,item.contentSerieName, 1, total_episode)
title += "%sx%s " % (season, str(episode).zfill(2))
url = host + "/" + link
if "DISPONIBLE" in name:
title += "No Disponible aún"
else:
title += name
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, plot=scrapedplot,
thumbnail=scrapedthumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
import base64
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
_sl = scrapertools.find_single_match(data, 'var abi = ([^;]+);')
sl = eval(_sl)
buttons = scrapertools.find_multiple_matches(data,'class="bsel" sl="(.+?)"')#[0,1,2,3,4]
for ids in buttons:
id = int(ids)
url_end = golink(id,sl)
new_url = "https://abtoon.net/" + "embed/" + sl[0] + "/" + sl[1] + "/" + str(id) + "/" + sl[2] + url_end
data_new = httptools.downloadpage(new_url).data
data_new = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_new)
logger.info("asdasdasdcc"+data_new)
valor1, valor2 = scrapertools.find_single_match(data_new, 'var x0x = \["[^"]*", "([^"]+)", "[^"]*", "[^"]*", "([^"]+)"\];')
try:
url = base64.b64decode(gktools.transforma_gsv(valor2, base64.b64decode(valor1)))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
except Exception as e:
logger.info(e)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, post=item.post).data
if len(data) > 0:
results = eval(data)
else:
return itemlist
for result in results:
try:
thumbnail = host + "/tb/%s.jpg" % result[0]
title = u'%s' % result[1]
logger.debug(title)
url = host + "/s/%s" % result[2]
itemlist.append(Item(channel=item.channel, thumbnail=thumbnail, title=title, url=url, contentSerieName=title,
action='episodios'))
except:
pass
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
import urllib
if texto != "":
texto = texto.replace(" ", "+")
item.url = host+"/b.php"
post = {'k':texto, "pe":"", "te":""}
item.post = urllib.urlencode(post)
try:
return search_results(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def golink(ida,sl):
a=ida
b=[3,10,5,22,31]
c=1
d=""
e=sl[2]
for i in range(len(b)):
d=d+substr(e,b[i]+a,c)
return d
def substr(st,a,b):
return st[a:a+b]

View File

@@ -1,75 +0,0 @@
{
"id": "allcalidad",
"name": "Allcalidad",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "https://s22.postimg.cc/irnlwuizh/allcalidad1.png",
"banner": "https://s22.postimg.cc/9y1athlep/allcalidad2.png",
"categories": [
"movie",
"direct"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,222 +0,0 @@
# -*- coding: utf-8 -*-
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
IDIOMAS = {'Latino': 'Latino'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'vimeo', 'netutv']
__channel__='allcalidad'
host = "https://allcalidad.io/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
import ast
from core import jsontools
data = '{"country_code":"PE","country_name":"Peru","city":null,"postal":null,"latitude":-12.0433,"longitude":-77.0283,"IPv4":"190.41.210.15","state":null}'
data = data.replace("null",'"null"')
logger.info("Intel22 %s" %data)
user_loc = ast.literal_eval(data)
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host, thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
itemlist.append(Item(channel = item.channel, title = "Por año", action = "generos_years", url = host, extra = ">Año<", thumbnail = get_thumb("year", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "favorites", url = host + "/favorites", thumbnail = get_thumb("favorites", auto = True) ))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def favorites(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)short_overlay.*?<a href="([^"]+)'
patron += '.*?img.*?src="([^"]+)'
patron += '.*?title="([^"]+).*?'
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumbnail, titulo in matches:
idioma = "Latino"
mtitulo = titulo + " (" + idioma + ")"
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
title = mtitulo,
fulltitle = titulo,
thumbnail = thumbnail,
url = url,
contentType="movie",
language = idioma
))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
if url_pagina != "":
pagina = "Pagina: " + scrapertools.find_single_match(url_pagina, "page/([0-9]+)")
itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host + 'category/animacion/'
elif categoria == 'terror':
item.url = host + 'category/torror/'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.extra = "busca"
if texto != '':
return peliculas(item)
else:
return []
def generos_years(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)%s(.*?)</ul></div>' %item.extra
bloque = scrapertools.find_single_match(data, patron)
patron = 'href="([^"]+)'
patron += '">([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, titulo in matches:
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = titulo,
url = url
))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
matches = scrapertools.find_multiple_matches(data, '(?s)shortstory cf(.*?)rate_post')
for datos in matches:
url = scrapertools.find_single_match(datos, 'href="([^"]+)')
titulo = scrapertools.find_single_match(datos, 'short_header">([^<]+)').strip()
datapostid = scrapertools.find_single_match(datos, 'data-postid="([^"]+)')
thumbnail = scrapertools.find_single_match(datos, 'img w.*?src="([^"]+)')
post = 'action=get_movie_details&postID=%s' %datapostid
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", post=post).data
idioma = "Latino"
mtitulo = titulo + " (" + idioma + ")"
year = scrapertools.find_single_match(data1, "Año:.*?(\d{4})")
if year:
mtitulo += " (" + year + ")"
item.infoLabels['year'] = int(year)
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
title = mtitulo,
fulltitle = titulo,
thumbnail = thumbnail,
url = url,
contentType="movie",
language = idioma
))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
if url_pagina != "":
pagina = "Pagina: " + scrapertools.find_single_match(url_pagina, "page/([0-9]+)")
itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina))
return itemlist
def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
if not item.infoLabels["year"]:
item.infoLabels["year"] = scrapertools.find_single_match(data, 'dateCreated.*?(\d{4})')
if "orig_title" in data:
contentTitle = scrapertools.find_single_match(data, 'orig_title.*?>([^<]+)<').strip()
if contentTitle != "":
item.contentTitle = contentTitle
bloque = scrapertools.find_single_match(data, '(?s)<div class="bottomPlayer">(.*?)<script>')
match = scrapertools.find_multiple_matches(bloque, '(?is)data-Url="([^"]+).*?data-postId="([^"]*)')
for dataurl, datapostid in match:
page_url = host + "wp-admin/admin-ajax.php"
post = "action=get_more_top_news&postID=%s&dataurl=%s" %(datapostid, dataurl)
data = httptools.downloadpage(page_url, post=post).data
url = scrapertools.find_single_match(data, '(?i)src="([^"]+)')
titulo = "Ver en: %s"
text_color = "white"
if "goo.gl" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "youtube" in url:
titulo = "Ver trailer: %s"
text_color = "yellow"
if "ad.js" in url or "script" in url or "jstags.js" in url or not datapostid:
continue
elif "vimeo" in url:
url += "|" + "http://www.allcalidad.com"
itemlist.append(
item.clone(channel = item.channel,
action = "play",
text_color = text_color,
title = titulo,
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
tmdb.set_infoLabels(itemlist, __modo_grafico__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist and item.contentChannel != "videolibrary":
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -1,72 +0,0 @@
{
"id": "allpeliculas",
"name": "Allpeliculas",
"language": ["esp", "lat"],
"active": true,
"adult": false,
"thumbnail": "http://i.imgur.com/aWCDWtn.png",
"banner": "allpeliculas.png",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT"
]
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,242 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from channelselector import get_thumb
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
__modo_grafico__ = config.get_setting('modo_grafico', "allpeliculas")
__perfil__ = int(config.get_setting('perfil', "allpeliculas"))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
IDIOMAS = {"Latino": "LAT"}
list_language = IDIOMAS.values()
list_quality = []
SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65": "thevideos",
"67": "spruto", "71": "stormo", "73": "idowatch", "48": "okru", "55": "openload",
"20": "nowvideo", "84": "fastplay", "96": "raptu", "94": "tusfiles"}
list_servers = ['powvideo', 'okru', 'openload', 'netutv', 'thevideos', 'spruto', 'stormo', 'idowatch', 'nowvideo',
'fastplay', 'raptu', 'tusfiles']
host = "https://allpeliculas.io/"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/newmovies?page=1", extra1 = 0,
thumbnail=get_thumb('movies', auto=True)))
itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/getGanres", thumbnail=get_thumb('genres', auto=True)))
itemlist.append(item.clone(title="Colecciones", action="colecciones", fanart="http://i.imgur.com/c3HS8kj.png",
url= host, thumbnail=get_thumb('colections', auto=True)))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'href="(/peliculas[^"]+).*?'
patron += 'title_geo"><span>([^<]+).*?'
patron += 'title_eng"><span>([^<]+).*?'
patron += 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedcantidad, scrapedthumbnail in matches:
if scrapedtitle == "LGTB" and config.get_setting("adult_mode") == 0:
continue
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
page = 1,
thumbnail = host + scrapedthumbnail,
title = title,
url = host + scrapedurl
))
return itemlist
def listado_colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
patron = 'a href="(/peli[^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'class="c_fichas_title">([^<]+).*?'
patron += 'Año:.*?href="">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
item.infoLabels['year'] = scrapedyear
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = host + scrapedurl
))
tmdb.set_infoLabels(itemlist)
item.page += 1
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
if len(data) > 50:
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
title = "Pagina siguiente>>",
page = item.page,
url = item.url
))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
dict_data = jsontools.load(data)
for it in dict_data:
itemlist.append(Item(
channel = item.channel,
action = "lista",
title = it['label'],
url = host + "movies/newmovies?page=1",
extra1 = it['id']
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "Próximamente" in data:
itemlist.append(Item(channel = item.channel, title = "Próximamente"))
return itemlist
patron = 'data-link="([^"]+).*?'
patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, calidad in matches:
calidad = scrapertools.find_single_match(calidad, "\d+") + scrapertools.find_single_match(calidad, "\..+")
itemlist.append(item.clone(
channel = item.channel,
action = "play",
title = calidad,
thumbnail = item.thumbnail,
contentThumbnail = item.thumbnail,
url = url,
language = IDIOMAS['Latino']
))
itemlist = servertools.get_servers_itemlist(itemlist)
tmdb.set_infoLabels(itemlist, seekTmdb = True)
itemlist.append(Item(channel=item.channel))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
item.thumbnail = item.contentThumbnail
return [item]
def lista(item):
logger.info()
itemlist = []
dict_param = dict()
item.infoLabels = {}
item.text_color = color2
params = '{}'
if item.extra1 != 0:
dict_param["genero"] = [item.extra1]
params = jsontools.dump(dict_param)
data = httptools.downloadpage(item.url, post=params).data
data = data.replace("<mark>","").replace("<\/mark>","")
dict_data = jsontools.load(data)
for it in dict_data["items"]:
year = it["year"]
url = host + "pelicula/" + it["slug"]
title = it["title"] + " (%s)" %year
thumb = host + it["image"]
item.infoLabels['year'] = year
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"], contentTitle=it["title"], contentType="movie"))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagina = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
item.url = item.url.replace(pagina, "")
if pagina == "":
pagina = "0"
pagina = int(pagina) + 1
item.url = item.url + "%s" %pagina
if item.extra != "busqueda":
itemlist.append(Item(channel = item.channel, action="lista", title="Pagina %s" %pagina, url=item.url, extra1 = item.extra1
))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "%20")
item.url = host + "/movies/search/" + texto
item.extra = "busqueda"
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host + "movies/newmovies?page=1"
item.action = "lista"
itemlist = lista(item)
if itemlist[-1].action == "lista":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,53 +0,0 @@
{
"id": "animeboom",
"name": "AnimeBoom",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "https://animeboom.net/images/logo.png",
"banner": "",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"VOSE"
]
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,293 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel AnimeBoom -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core import servertools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import filtertools
from channels import renumbertools
host = "https://animeboom.net/"
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animeboom')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animeboom')
IDIOMAS = {'Latino':'LAT', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['directo', 'openload', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios",
action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel, title="Ultimas",
action="list_all",
thumbnail=get_thumb('last', auto=True),
url=host + 'emision'))
itemlist.append(Item(channel=item.channel, title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'series'))
itemlist.append(Item(channel=item.channel, title="Series",
action="list_all",
thumbnail=get_thumb('tvshows', auto=True),
url=host + 'tv'))
itemlist.append(Item(channel=item.channel, title="Películas",
action="list_all",
thumbnail=get_thumb('movies', auto=True),
url=host + 'peliculas'))
itemlist.append(Item(channel=item.channel, title="OVAs",
action="list_all",
thumbnail='',
url=host + 'ova'))
itemlist.append(Item(channel=item.channel, title="ONAs",
action="list_all",
thumbnail='',
url=host + 'ona'))
itemlist.append(Item(channel=item.channel, title="Especiales",
action="list_all",
thumbnail='',
url=host + '/specials'))
itemlist.append(Item(channel=item.channel, title="Buscar",
action="search",
url=host + 'search?s=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article class="([^"]+)"><figure class="image"><a href="([^"]+)" title=".*?">'
patron += '<img src="([^"]+)" alt="([^"]+)">.*?class="year">(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for type, scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
if 'latino' in scrapedtitle.lower():
lang = 'Latino'
else:
lang = 'VOSE'
title = re.sub('Audio Latino', '', scrapedtitle)
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel, action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=title,
language = lang,
context = context,
infoLabels={'year':year}
))
# Paginacion
next_page = scrapertools.find_single_match(data,
'<a href="([^"]+)" rel="next">&raquo;</a>')
next_page_url = scrapertools.decodeHtmlentities(next_page)
if next_page_url != "":
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search_results(item):
logger.info()
itemlist=[]
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<div class="search-results">(.*?)<h4')
patron = '<a href="([^"]+)".*?<img src="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
title = re.sub('online|Audio|Latino', '', scrapedtitle)
title = title.lstrip()
title = title.rstrip()
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel,
action="episodios",
title=title,
contentSerieName=title,
url=url,
context = context,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return search_results(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def new_episodes(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '>Episodios Estreno</h2>(.*?)</section>')
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
if 'latino' in scrapedtitle.lower():
lang = 'Latino'
else:
lang = 'VOSE'
title = re.sub('sub|Sub|Español|español|Audio|Latino|audio|latino','', scrapedtitle)
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail,
action='findvideos', language=lang))
return itemlist
def episodios(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<ul class="list-episodies scrolling">(.*?)</ul>')
patron = '<a href="([^"]+)".*?title="([^"]+)".*?Episodio (\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, title, episode in matches:
if 'latino' in title.lower():
lang='Latino'
else:
lang = 'VOSE'
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName)
url = scrapedurl
infoLabels['season'] = season
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=lang, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
#return
patron = 'video\[\d+\] = \'<iframe src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
if 'animeboom' in scrapedurl:
new_data = get_source(scrapedurl)
scrapedurl = scrapertools.find_single_match(new_data, 'src:"([^,]+)",')
if scrapedurl != '':
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play', language = item.language,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def newest(categoria):
itemlist = []
item = Item()
if categoria == 'anime':
item.url=host
itemlist = new_episodes(item)
return itemlist

View File

@@ -1,43 +0,0 @@
{
"id": "animeflv",
"name": "AnimeFLV",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "animeflv.png",
"banner": "animeflv.png",
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"LAT"
]
}
]
}

View File

@@ -1,250 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import renumbertools
from core import httptools
from core import jsontools
from core import servertools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
IDIOMAS = {'LAT': 'LAT','SUB': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['directo', 'rapidvideo', 'streamango', 'yourupload', 'mailru', 'netutv', 'okru']
list_quality = ['default']
HOST = "https://animeflv.net/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST))
itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "browse?order=title"))
itemlist.append(Item(channel=item.channel, title="Buscar por:"))
itemlist.append(Item(channel=item.channel, action="search", title=" Título"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "browse",
extra="genre"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Tipo", url=HOST + "browse",
extra="type"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Año", url=HOST + "browse",
extra="year"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Estado", url=HOST + "browse",
extra="status"))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
itemlist = []
item.url = urlparse.urljoin(HOST, "api/animes/search")
texto = texto.replace(" ", "+")
post = "value=%s" % texto
data = httptools.downloadpage(item.url, post=post).data
try:
dict_data = jsontools.load(data)
for e in dict_data:
if e["id"] != e["last_id"]:
_id = e["last_id"]
else:
_id = e["id"]
url = "%sanime/%s/%s" % (HOST, _id, e["slug"])
title = e["title"]
#if "&#039;" in title:
# title = title.replace("&#039;","")
#if "&deg;" in title:
# title = title.replace("&deg;","")
thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"])
new_item = item.clone(action="episodios", title=title, url=url, thumbnail=thumbnail)
if e["type"] != "movie":
new_item.show = title
new_item.context = renumbertools.context(item)
else:
new_item.contentType = "movie"
new_item.contentTitle = title
itemlist.append(new_item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
def search_section(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
patron = 'id="%s_select"[^>]+>(.*?)</select>' % item.extra
data = scrapertools.find_single_match(data, patron)
matches = re.compile('<option value="([^"]+)">(.*?)</option>', re.DOTALL).findall(data)
for _id, title in matches:
url = "%s?%s=%s&order=title" % (item.url, item.extra, _id)
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
context=renumbertools.context(item)))
return itemlist
def newest(categoria):
itemlist = []
if categoria == 'anime':
itemlist = novedades_episodios(Item(url=HOST))
return itemlist
def novedades_episodios(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<h2>Últimos episodios</h2>.+?<ul class="ListEpisodios[^>]+>(.*?)</ul>')
matches = re.compile('<a href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>'
'<strong class="Title">(.*?)</strong>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, str_episode, show in matches:
try:
episode = int(str_episode.replace("Episodio ", ""))
except ValueError:
season = 1
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (show, season, str(episode).zfill(2))
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail,
fulltitle=title)
itemlist.append(new_item)
return itemlist
def novedades_anime(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
matches = re.compile('href="([^"]+)".+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?'
'(?:</p><p>(.*?)</p>.+?)?</article></li>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, _type, title, plot in matches:
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
if _type != "Película":
new_item.show = title
new_item.context = renumbertools.context(item)
else:
new_item.contentType = "movie"
new_item.contentTitle = title
itemlist.append(new_item)
return itemlist
def listado(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')
data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
data = "".join(data)
matches = re.compile('<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>'
'.*?</p><p>(.*?)</p>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, _type, title, plot in matches:
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
if _type == "Anime":
new_item.show = title
new_item.context = renumbertools.context(item)
else:
new_item.contentType = "movie"
new_item.contentTitle = title
itemlist.append(new_item)
if url_pagination:
url = urlparse.urljoin(HOST, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
info = scrapertools.find_single_match(data, "anime_info = \[(.*?)\];")
info = eval(info)
episodes = eval(scrapertools.find_single_match(data, "var episodes = (.*?);"))
for episode in episodes:
url = '%s/ver/%s/%s-%s' % (HOST, episode[1], info[2], episode[0])
season = 1
season, episodeRenumber = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, season, int(episode[0]))
#title = '1x%s Episodio %s' % (episode[0], episode[0])
title = '%sx%s Episodio %s' % (season, str(episodeRenumber).zfill(2), episodeRenumber)
itemlist.append(item.clone(title=title, url=url, action='findvideos', contentSerieName=item.contentSerieName))
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.contentSerieName))
return itemlist
def findvideos(item):
logger.info()
from core import jsontools
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
videos = scrapertools.find_single_match(data, 'var videos = (.*?);')
videos_json = jsontools.load(videos)
for video_lang in videos_json.items():
language = video_lang[0]
matches = scrapertools.find_multiple_matches(str(video_lang[1]), "code': '(.*?)'")
for source in matches:
url = source
if 'redirector' in source:
new_data = httptools.downloadpage(source).data
url = scrapertools.find_single_match(new_data, 'window.location.href = "([^"]+)"')
elif 'animeflv.net/embed' in source:
source = source.replace('embed', 'check')
new_data = httptools.downloadpage(source).data
json_data = jsontools.load(new_data)
try:
url = json_data['file']
except:
continue
itemlist.append(Item(channel=item.channel, url=url, title='%s', action='play', language=language))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,45 +0,0 @@
{
"id": "animeflv_ru",
"name": "AnimeFLV.RU",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "http://i.imgur.com/5nRR9qq.png",
"banner": "animeflv_ru.png",
"compatible": {
"python": "2.7.9"
},
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE"
]
}
]
}

View File

@@ -1,227 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import renumbertools
from core import httptools
from core import servertools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import logger
from channels import autoplay
IDIOMAS = {'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['directo']
list_quality = ['default']
HOST = "https://animeflv.ru/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST))
itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "animes/nombre/lista"))
itemlist.append(Item(channel=item.channel, title="Buscar por:"))
itemlist.append(Item(channel=item.channel, action="search", title=" Título"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "animes",
extra="genre"))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def clean_title(title):
year_pattern = r'\([\d -]+?\)'
return re.sub(year_pattern, '', title).strip()
def search(item, texto):
logger.info()
itemlist = []
item.url = urlparse.urljoin(HOST, "search_suggest")
texto = texto.replace(" ", "+")
post = "value=%s" % texto
data = httptools.downloadpage(item.url, post=post).data
try:
dict_data = jsontools.load(data)
for e in dict_data:
title = clean_title(scrapertools.htmlclean(e["name"]))
url = e["url"]
plot = e["description"]
thumbnail = e["thumb"]
new_item = item.clone(action="episodios", title=title, url=url, plot=plot, thumbnail=thumbnail)
if "Pelicula" in e["genre"]:
new_item.contentType = "movie"
new_item.contentTitle = title
else:
new_item.show = title
new_item.context = renumbertools.context(item)
itemlist.append(new_item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
def search_section(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
patron = 'id="%s_filter"[^>]+><div class="inner">(.*?)</div></div>' % item.extra
data = scrapertools.find_single_match(data, patron)
matches = re.compile('<a href="([^"]+)"[^>]+>(.*?)</a>', re.DOTALL).findall(data)
for url, title in matches:
url = "%s/nombre/lista" % url
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
context=renumbertools.context(item)))
return itemlist
def newest(categoria):
itemlist = []
if categoria == 'anime':
itemlist = novedades_episodios(Item(url=HOST))
return itemlist
def novedades_episodios(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<ul class="ListEpisodios[^>]+>(.*?)</ul>')
matches = re.compile('href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>'
'<strong class="Title">(.*?)</strong>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, str_episode, show in matches:
try:
episode = int(str_episode.replace("Ep. ", ""))
except ValueError:
season = 1
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (show, season, str(episode).zfill(2))
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail,
fulltitle=title)
itemlist.append(new_item)
return itemlist
def novedades_anime(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
matches = re.compile('<img src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
itemlist = []
for thumbnail, url, title in matches:
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
title = clean_title(title)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title)
new_item.show = title
new_item.context = renumbertools.context(item)
itemlist.append(new_item)
return itemlist
def listado(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')
matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
'<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
re.DOTALL).findall(data)
itemlist = []
for thumbnail, url, title, genres, plot in matches:
title = clean_title(title)
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
if "Pelicula Anime" in genres:
new_item.contentType = "movie"
new_item.contentTitle = title
else:
new_item.show = title
new_item.context = renumbertools.context(item)
itemlist.append(new_item)
if url_pagination:
url = urlparse.urljoin(HOST, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.plot == "":
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>')
matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data)
for url, title in matches:
title = title.strip()
url = urlparse.urljoin(item.url, url)
thumbnail = item.thumbnail
try:
episode = int(scrapertools.find_single_match(title, "Episodio (\d+)"))
except ValueError:
season = 1
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title,
fanart=thumbnail, contentType="episode"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
_id = scrapertools.find_single_match(item.url, HOST + 'ver/([^/]+)/')
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'atrl(.*?)choose_quality')
matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)')
headers = {"Referer" : item.url}
for url in matches:
post = "embed_id=%s" % _id
xserver = scrapertools.find_single_match(url, 's=(\w+)')
data = httptools.downloadpage(HOST + "get_video_info_v2?s=%s" %xserver, post=post).data
dict_data = jsontools.load(data)
data = httptools.downloadpage(dict_data["value"], headers=headers).data
matches = scrapertools.find_multiple_matches(data, '"file":"([^"]+)"')
for url in matches:
url = url.replace("\\","")
itemlist.append(item.clone(action="play", url=url, title='%s',
fulltitle=item.title, language='VOSE'
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,30 +0,0 @@
{
"id": "animeid",
"name": "Animeid",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "animeid.png",
"banner": "animeid.png",
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,282 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import renumbertools, autoplay
CHANNEL_HOST = "https://www.animeid.tv/"
IDIOMAS = {'Latino': 'LAT', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['animeid']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="novedades_series", title="Últimas series", url=CHANNEL_HOST))
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios",
url=CHANNEL_HOST, viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="generos", title="Listado por genero", url=CHANNEL_HOST))
itemlist.append(
Item(channel=item.channel, action="letras", title="Listado alfabetico", url=CHANNEL_HOST))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar..."))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'anime':
item.url = CHANNEL_HOST
itemlist = novedades_episodios(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
# todo ARREGLAR
def search(item, texto):
logger.info()
itemlist = []
if item.url == "":
item.url = CHANNEL_HOST + "ajax/search?q="
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
headers = []
headers.append(
["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0"])
headers.append(["Referer", CHANNEL_HOST])
headers.append(["X-Requested-With", "XMLHttpRequest"])
data = httptools.downloadpage(item.url, headers=headers).data
data = data.replace("\\", "")
patron = '{"id":"([^"]+)","text":"([^"]+)","date":"[^"]*","image":"([^"]+)","link":"([^"]+)"}'
matches = re.compile(patron, re.DOTALL).findall(data)
for id, scrapedtitle, scrapedthumbnail, scrapedurl in matches:
title = scrapedtitle
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = scrapedthumbnail
plot = ""
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot,
context=context, show=title, viewmode="movie_with_plot"))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def novedades_series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="series">(.*?)</section>')
patronvideos = '(?s)<a href="([^"]+)">.*?tipo\d+">([^<]+)</span>.*?<strong>([^<]+)</strong>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, tipo, title in matches:
scrapedtitle = title + " (" + tipo + ")"
scrapedurl = urlparse.urljoin(item.url, url)
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
context=context, show=title, viewmode="movie_with_plot"))
return itemlist
def novedades_episodios(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="lastcap">(.*?)</section>')
patronvideos = '(?s)<a href="([^"]+)">[^<]+<header>([^<]+).*?src="([^"]+)"[\s\S]+?<p>(.+?)</p>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
for url, title, thumbnail, plot in matches:
scrapedtitle = scrapertools.entityunescape(title)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = thumbnail
scrapedplot = plot
episodio = scrapertools.find_single_match(scrapedtitle, '\s+#(.*?)$')
contentTitle = scrapedtitle.replace('#' + episodio, '')
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, contentSeason=1, contentTitle=contentTitle))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<div class="generos">(.*?)</div>')
patronvideos = '(?s)<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for url, title in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append(
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedplot, show=title, viewmode="movie_with_plot"))
return itemlist
def letras(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<ul id="letras">(.*?)</ul>')
patronvideos = '<li> <a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
for url, title in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url, url)
itemlist.append(
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl,
show=title, viewmode="movie_with_plot"))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)<article class="item"[^<]+'
patron += '<a href="([^"]+)"[^<]+<header>([^<]+)</header[^<]+.*?'
patron += 'src="([^"]+)".*?<p>(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, thumbnail, plot in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = thumbnail
scrapedplot = plot
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, context=context,
viewmode="movie_with_plot"))
itemlist = sorted(itemlist, key=lambda it: it.title)
try:
page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">&gt;</a></li>')
itemlist.append(Item(channel=item.channel, action="series", title="[COLOR cyan]>> Página siguiente[/COLOR]",
url=urlparse.urljoin(item.url, page_url), viewmode="movie_with_plot", thumbnail="",
plot=""))
except:
pass
return itemlist
def episodios(item, final=True):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)')
CHANNEL_HEADERS = [
["Host", "m.animeid.tv"],
["X-Requested-With", "XMLHttpRequest"]
]
page = 0
while True:
page += 1
u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" % (data_id, page)
data = httptools.downloadpage(u, headers=CHANNEL_HEADERS).data
# Cuando ya no hay datos devuelve: "list":[]
if '"list":[]' in data:
break
dict_data = jsontools.load(data)
list = dict_data['list'][::-1]
for dict in list:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1,
int(dict["numero"]))
title = "%sx%s - %s" % (season, str(episode).zfill(2), dict["date"])
itemlist.append(Item(action="findvideos",
channel=item.channel,
title=title,
url=CHANNEL_HOST + dict['href'],
thumbnail=item.thumbnail,
show=item.show,
viewmode="movie_with_plot"
))
if config.get_videolibrary_support():
itemlist.append(
Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(
Item(channel=item.channel, title="[COLOR white]Descargar todos los episodios de la serie[/COLOR]",
url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url_anterior = scrapertools.find_single_match(data, '<li class="b"><a href="([^"]+)">« Capítulo anterior')
url_siguiente = scrapertools.find_single_match(data, '<li class="b"><a href="([^"]+)">Siguiente capítulo »')
data = scrapertools.find_single_match(data, '<ul id="partes">(.*?)</ul>').decode("unicode-escape")
data = data.replace("\\/", "/").replace("%3A", ":").replace("%2F", "/")
patron = '(https://www.animeid.tv/stream/[^/]+/\d+.[a-z0-9]+)'
matches = scrapertools.find_multiple_matches(data, patron)
encontrados = set()
for url in matches:
if url not in encontrados:
itemlist.append(
Item(channel=item.channel, action="play", title="[directo]", server="directo", url=url, thumbnail="",
plot="", show=item.show, folder=False))
encontrados.add(url)
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.title = "[" + videoitem.server + "]"
if url_anterior:
title_anterior = url_anterior.strip("/v/").replace('-', ' ').strip('.html')
itemlist.append(Item(channel=item.channel, action="findvideos", title="Anterior: " + title_anterior,
url=CHANNEL_HOST + url_anterior, thumbnail=item.thumbnail, plot=item.plot, show=item.show,
fanart=item.thumbnail, folder=True))
if url_siguiente:
title_siguiente = url_siguiente.strip("/v/").replace('-', ' ').strip('.html')
itemlist.append(Item(channel=item.channel, action="findvideos", title="Siguiente: " + title_siguiente,
url=CHANNEL_HOST + url_siguiente, thumbnail=item.thumbnail, plot=item.plot, show=item.show,
fanart=item.thumbnail, folder=True))
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,30 +0,0 @@
{
"id": "animejl",
"name": "AnimeJL",
"active": true,
"adult": false,
"language": ["esp", "cast", "lat"],
"thumbnail": "https://i.imgur.com/S6foTE9.png",
"banner": "",
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,199 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel AnimeJL -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from channelselector import get_thumb
host = 'https://www.animejl.net/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios", action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True), url=host))
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", thumbnail=get_thumb('all', auto=True),
url=host + 'animes'))
itemlist.append(Item(channel=item.channel, title="Series", action="list_all",
thumbnail=get_thumb('tvshows', auto=True), url=host+'animes?type%5B%5D=1&order=default'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
thumbnail=get_thumb('movies',auto=True), url=host + 'animes?type%5B%5D=2&order=default'))
itemlist.append(
Item(channel=item.channel, title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
url=host + 'animes?q='))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}|"|\(|\)', "", data)
return data
def new_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, "<h2>Últimos episodios</h2>.*?</ul>")
patron = "<li><a href='(.*?)' class.*?<img src='(.*?)' alt='(.*?)'></span><span class='Capi'>(.*?)</span>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepi in matches:
url = host+scrapedurl
thumbnail = host+scrapedthumbnail
title = '%s %s' % (scrapedtitle, scrapedepi)
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=scrapedtitle,
))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<article class='Anime alt B'><a href='(.*?)'>.*?class=.*?<img src='(.*?)' alt='(.*?)'>"
patron +="</figure><span class='Type .*?'>(.*?)</span>.*?star.*?<p>(.*?)</p>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, type, plot in matches:
url = host + scrapedurl
thumbnail = host+scrapedthumbnail
title = scrapedtitle
type = type
season = ''
if 'season' in scrapedtitle.lower():
season = scrapertools.find_single_match(scrapedtitle, 'season (\d+)')
scrapedtitle = scrapertools.find_single_match(scrapedtitle, '(.*?) season')
new_item = Item(channel=item.channel, action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=scrapedtitle,
plot=plot,
type=item.type,
infoLabels={}
)
if type.lower() == 'anime':
new_item.contentSerieName = scrapedtitle
new_item.contentSeasonNumber = season
else:
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
# Paginacion
next_page = scrapertools.find_single_match(data,
"<li><a href='([^']+)'><span>&raquo;</span></a></li></ul>")
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=host+next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
base_data = get_source(item.url)
data = scrapertools.find_single_match(base_data, '<div class=Title>Lista de episodios</div>.*?</ul>')
if data == '':
data = scrapertools.find_single_match(base_data, '<div class=Title>Formatos disponibles</div>.*?</ul>')
if 'listepisodes' in data.lower():
patron = "<li><a href='(.*?)' class.*?>(.*?)<i class='fa-eye-slash'></i></a></li>"
elif 'listcaps' in data.lower():
patron = "<a href=(.*?)>.*?alt=(.*?)>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
n=0
for char in title[::-1]:
n += 1
if char == ' ':
break
episode = title[-n:]
episode = scrapertools.find_single_match(episode, r' (\d+)')
url = host + scrapedurl
itemlist.append(Item(channel=item.channel, title='Episodio %s' % episode, thumbnail=item.thumbnail, url=url,
action='findvideos'))
if item.type.lower != 'anime' and len(itemlist)==1:
return findvideos(itemlist[0])
else:
return itemlist[::-1]
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.title = '[%s]' % videoitem.server.capitalize()
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'anime':
item.url = host
itemlist = new_episodes(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []

View File

@@ -1,46 +0,0 @@
{
"id": "animeshd",
"name": "AnimesHD",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "https://s21.postimg.cc/b43i3ljav/animeshd.png",
"banner": "https://s4.postimg.cc/lulxulmql/animeshd-banner.png",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -1,256 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core import servertools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay, renumbertools
from channels import filtertools
tgenero = {"Comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
"Drama": "https://s16.postimg.cc/94sia332d/drama.png",
"Acción": "https://s3.postimg.cc/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.cc/6su40czih/aventura.png",
"Romance": "https://s15.postimg.cc/fb5j8cl63/romance.png",
"Ciencia ficción": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.cc/yi0gij3gb/terror.png",
"Fantasía": "https://s13.postimg.cc/65ylohgvb/fantasia.png",
"Misterio": "https://s1.postimg.cc/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.cc/6z27zhirx/crimen.png",
"Hentai": "https://s29.postimg.cc/aamrngu2f/hentai.png",
"Magia": "https://s9.postimg.cc/nhkfzqffj/magia.png",
"Psicológico": "https://s13.postimg.cc/m9ghzr86f/psicologico.png",
"Sobrenatural": "https://s9.postimg.cc/6hxbvd4ov/sobrenatural.png",
"Torneo": "https://s2.postimg.cc/ajoxkk9ih/torneo.png",
"Thriller": "https://s22.postimg.cc/5y9g0jsu9/thriller.png",
"Otros": "https://s30.postimg.cc/uj5tslenl/otros.png"}
host = "http://www.animeshd.tv"
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animeshd')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animeshd')
IDIOMAS = {'Castellano':'CAST','Latino': 'LAT', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'openload', 'gvideo', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Castellano",
action="lista",
thumbnail=get_thumb('channels_spanish.png'),
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
url=host + '/castellano'))
itemlist.append(item.clone(title="Latino",
action="lista",
thumbnail=get_thumb('channels_latino.png'),
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
url=host + '/latino'))
itemlist.append(item.clone(title="Todas",
action="lista",
thumbnail=get_thumb('all', auto=True),
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
url=host + '/buscar?t=todo&q='))
itemlist.append(item.clone(title="Generos",
action="generos",
url=host,
thumbnail=get_thumb('genres', auto=True),
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png'))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '/buscar?t=todo&q=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista(item):
logger.info()
itemlist = []
post = ''
if item.extra in ['episodios']:
post = {'tipo': 'episodios', '_token': 'rAqVX74O9HVHFFigST3M9lMa5VL7seIO7fT8PBkl'}
post = urllib.urlencode(post)
data = get_source(item.url)
patron = 'class="anime"><a href="([^"]+)">'
patron +='<div class="cover" style="background-image: url\((.*?)\)">.*?<h2>([^<]+)<\/h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.extra != "next":
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = host + scrapedthumbnail
title = scrapedtitle
itemlist.append(item.clone(action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=title,
context=filtertools.context(item, list_language, list_quality)
))
# Paginacion
next_page = scrapertools.find_single_match(data,
'<a href="([^"]+)" data-ci-pagination-page="\d+" rel="next"')
next_page_url = scrapertools.decodeHtmlentities(next_page)
if next_page_url != "":
itemlist.append(item.clone(action="lista",
title=">> Página siguiente",
url=next_page_url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
extra="next"
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return lista(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def generos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a href="https:\/\/www\.animeshd\.tv\/genero\/([^"]+)">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
if title == 'Recuentos de la vida':
title = 'Otros'
genero = scrapertools.decodeHtmlentities(scrapedurl)
thumbnail = ''
if title in tgenero:
thumbnail = tgenero[title]
url = 'http://www.animeshd.tv/genero/%s' % genero
itemlist.append(item.clone(action='lista', title=title, url=url, thumbnail=thumbnail))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li id="epi-.*? class="list-group-item.*?"><a href="([^"]+)".*?'
patron += 'class="badge".*?width="25" title="([^"]+)">.*?<\/span>(.*?) (\d+)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedlang, scrapedtitle, episode in matches:
language = scrapedlang
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
title = scrapedtitle + " " + str(season) +"x" + str(episode)
url = scrapedurl
infoLabels['season'] = str(season)
infoLabels['episode'] = str(episode)
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=IDIOMAS[language], infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def findvideos(item):
logger.info()
from channels.pelisplus import add_vip
itemlist = []
data = get_source(item.url)
patron = "<option value=\"([^\"]+)\" data-content=.*?width='16'> (.*?) <span class='text-muted'>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, language in matches:
vip = False
if not config.get_setting('unify'):
title = ' [%s]' % IDIOMAS[language]
else:
title = ''
if 'pelisplus.net' in scrapedurl:
itemlist += add_vip(item, scrapedurl, IDIOMAS[language])
vip = True
elif 'server' in scrapedurl:
new_data = get_source(scrapedurl)
scrapedurl = scrapertools.find_single_match(new_data, '<iframe src="([^"]+)"')
if not vip:
itemlist.append(item.clone(title='%s'+title, url=scrapedurl.strip(), action='play',
language=IDIOMAS[language]))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,23 +0,0 @@
{
"id": "anitoonstv",
"name": "AniToons TV",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "http://i.imgur.com/9Zu5NBc.png",
"banner": "http://i.imgur.com/JQSXCaB.png",
"categories": [
"tvshow",
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,218 +0,0 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload',
'okru',
'netutv',
'rapidvideo'
]
list_quality = ['default']
host = "https://www.anitoonstv.com"
def mainlist(item):
logger.info()
thumb_series = get_thumb("tvshows", auto=True)
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", contentTitle="Series", url=host+"/catalogo.php?g=&t=series&o=0",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="lista", title="Películas", contentTitle="Películas", url=host+"/catalogo.php?g=&t=peliculas&o=0",
thumbnail=thumb_series, range=[0,19] ))
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", contentTitle="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
thumbnail=thumb_series, range=[0,19]))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host +"/php/buscar.php"
item.texto = texto
if texto != '':
return sub_search(item)
else:
return []
def sub_search(item):
logger.info()
itemlist = []
post = "b=" + item.texto
headers = {"X-Requested-With":"XMLHttpRequest"}
data = httptools.downloadpage(item.url, post=post, headers=headers).data
patron = "href='([^']+).*?"
patron += ">([^<]+)"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action = "episodios",
title = scrapedtitle,
url = scrapedurl
))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
#logger.info("Pagina para regex "+data)
patron = '<div class="serie">' #Encabezado regex
patron +="<a href='(.+?)'>" #scrapedurl
patron +="<img src='(.+?)'.+?" #scrapedthumbnail
patron +="<p class='.+?'>(.+?)<\/p>" #scrapedtitle
patron +=".+?<span .+?>(.+?)<\/span>" #scrapedplot
matches = scrapertools.find_multiple_matches(data, patron)
next_page = [item.range[0]+19, item.range[1]+20]
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedplot in matches[item.range[0] : item.range[1]]:
if ":" in scrapedtitle:
cad = scrapedtitle.split(":")
show = cad[0]
else:
if "(" in scrapedtitle:
cad = scrapedtitle.split("(")
if "Super" in scrapedtitle:
show = cad[1]
show = show.replace(")", "")
else:
show = cad[0]
else:
show = scrapedtitle
if "&" in show:
cad = scrapedtitle.split("xy")
show = cad[0]
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
scrapedurl=host+scrapedurl
if item.contentTitle!="Series":
itemlist.append(item.clone(title=scrapedtitle, contentTitle=show,url=scrapedurl,
thumbnail=scrapedthumbnail, action="findvideos", context=context))
else:
itemlist.append(item.clone(title=scrapedtitle, contentSerieName=show,url=scrapedurl, plot=scrapedplot,
thumbnail=scrapedthumbnail, action="episodios", context=context))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', contentTitle=item.contentTitle, action='lista'))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div class="pagina">(.*?)cajaSocial'
data = scrapertools.find_single_match(data, patron)
patron_caps = "<li><a href='(.+?)'>Cap(?:i|í)tulo: (.+?) - (.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data, patron_caps)
#show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>')
scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>")
scrapedplot = scrapertools.find_single_match(data, '<span>Descripcion.+?<\/span>(.+?)<br>')
i = 0
temp = 0
infoLabels = item.infoLabels
for link, cap, name in matches:
if int(cap) == 1:
temp = temp + 1
if int(cap) < 10:
cap = "0" + cap
season = temp
episode = int(cap)
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
infoLabels['season'] = season
infoLabels['episode'] = episode
date = name
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# title = str(temp)+"x"+cap+" "+name
url = host + "/" + link
if "NO DISPONIBLE" not in name:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail,
plot=scrapedplot, url=url, contentSeasonNumber=season, contentEpisodeNumber=episode,
contentSerieName=item.contentSerieName, infoLabels=infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def googl(url):
logger.info()
a=url.split("/")
link=a[3]
link="http://www.trueurl.net/?q=http%3A%2F%2Fgoo.gl%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<td class="withbg">Destination URL<\/td><td><A title="(.+?)"'
trueurl = scrapertools.find_single_match(data_other, patron)
return trueurl
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_vid = scrapertools.find_single_match(data1, 'var q = \[ \[(.+?)\] \]')
# name = scrapertools.find_single_match(data,'<span>Titulo.+?<\/span>([^<]+)<br>')
scrapedplot = scrapertools.find_single_match(data, '<br><span>Descrip.+?<\/span>([^<]+)<br>')
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
itemla = scrapertools.find_multiple_matches(data_vid, '"(.+?)"')
for url in itemla:
url=url.replace('\/', '/')
server1=url.split('/')
server=server1[2]
if "." in server:
server1=server.split('.')
if len(server1)==3:
server=server1[1]
else:
server=server1[0]
if "goo" in url:
url = googl(url)
server='netutv'
if "hqq" in url:
server='netutv'
if "ok" in url:
url = "https:"+url
server='okru'
quality="360p"
itemlist.append(item.clone(url=url, action="play",
thumbnail=scrapedthumbnail, server=server, plot=scrapedplot,
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
if item.contentTitle!="" and config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta película a la videoteca[/COLOR]", url=item.url,
action="add_pelicula_to_library", extra="episodios", show=item.contentTitle))
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,44 +0,0 @@
{
"id": "areadocumental",
"name": "Area-Documental",
"language": ["esp", "lat", "cast"],
"adult": false,
"active": true,
"banner": "areadocumental.png",
"thumbnail": "areadocumental.png",
"categories": [
"documentary"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,243 +0,0 @@
# -*- coding: utf-8 -*-
import urllib
import re
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = "http://www.area-documental.com"
__perfil__ = int(config.get_setting('perfil', "areadocumental"))
# Fijar perfil de color
perfil = [['', '', ''],
['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
itemlist.append(item.clone(title="Novedades", action="entradas",
url= host + "/resultados-reciente.php?buscar=&genero=",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Destacados", action="destacados",
url= host + "/resultados-destacados.php?buscar=&genero=",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Categorías", action="cat", url= host + "/index.php",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Ordenados por...", action="indice", fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(title="Configurar canal", action="configuracion", text_color="gold"))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}|"|\(|\)', "", data)
return data
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = host + "/resultados.php?buscar=%s&genero=&x=0&y=0" % texto
item.action = "entradas"
try:
itemlist = entradas(item)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "documentales":
item.url = host + "/resultados-reciente.php?buscar=&genero="
item.action = "entradas"
itemlist = entradas(item)
if itemlist[-1].action == "entradas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def indice(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Título", action="entradas",
url= host + "/resultados-titulo.php?buscar=&genero="))
itemlist.append(item.clone(title="Año", action="entradas",
url= host + "/resultados-anio.php?buscar=&genero="))
return itemlist
def cat(item):
logger.info()
itemlist = []
data = get_source(item.url)
bloques = scrapertools.find_multiple_matches(data, '</li><li class=dropdown>.*?</ul>')
for bloque in bloques:
matches = scrapertools.find_multiple_matches(bloque, "<li><a href=(.*?)>(.*?)<")
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + "/" + scrapedurl
if not "TODO" in scrapedtitle:
itemlist.append(item.clone(action="entradas", title=scrapedtitle, url=scrapedurl))
return itemlist
def destacados(item):
logger.info()
itemlist = []
item.text_color = color2
data = httptools.downloadpage(item.url).data
data = scrapertools.unescape(data)
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"> ></a>')
if next_page != "":
data2 = scrapertools.unescape(httptools.downloadpage(host + next_page).data)
data += data2
else:
data2 = ""
data = data.replace("\n", "").replace("\t", "")
patron = '(?s)<div id="peliculas">.*?a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
patron += 'target="_blank">(.*?)</a></span>'
patron += '(.*?)<p>'
patron += '(.*?)</p>.*?'
patron += '</strong>:(.*?)<strong>.*?'
patron += '</strong>(.*?)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot, genero, extra in matches:
infolab = {'plot': scrapedplot, 'genre': genero}
scrapedurl = host + "/" + scrapedurl
scrapedthumbnail = host + urllib.quote(scrapedthumbnail)
title = scrapedtitle
if "full_hd" in extra:
quality = "3D"
elif "720" in extra:
quality ='720'
else:
quality = 'SD'
year = year.replace("\xc2\xa0", "").replace(" ", "")
if not year.isspace() and year != "":
infolab['year'] = int(year)
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title,
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels=infolab, contentTitle =
title, quality = quality))
next_page = scrapertools.find_single_match(data2, '<a href="([^"]+)"> ></a>')
if next_page:
itemlist.append(item.clone(action="entradas", title=">> Página Siguiente", url=host + next_page,
text_color=color3))
return itemlist
def entradas(item):
logger.info()
itemlist = []
item.text_color = color2
data = get_source(item.url)
patron = 'class=imagen.*?href=(.*?)><img.*?src=(.*?) alt=.*?title=(.*?)/>.*?</h2>(\d{4}) (.*?)<.*?space>(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, genero, scrapedplot in matches:
infolab = {'plot': scrapedplot, 'genre': genero}
scrapedurl = host + "/" + scrapedurl
scrapedthumbnail = host +'/'+ scrapedthumbnail
title = scrapedtitle
if not year.isspace() and year != "":
infolab['year'] = int(year)
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title,
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels=infolab, contentTitle =
title))
next_page = scrapertools.find_single_match(data, '<a class=last>.*?</a></li><li><a href=(.*?)>.*?</a>')
next_page = scrapertools.htmlclean(next_page)
if next_page:
itemlist.append(item.clone(action="entradas", title=">> Página Siguiente", url=host + next_page,
text_color=color3))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
subs = scrapertools.find_multiple_matches(data, 'file: "(/webvtt[^"]+)".*?label: "([^"]+)"')
patron = 'file:\s*"(http://[^/]*/Videos/[^"]+)",\s*label:\s*"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url, quality in matches:
url += "|User-Agent=%s&Referer=%s" \
% ("Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0", item.url)
for url_sub, label in subs:
url_sub = host + urllib.quote(url_sub)
title = "Ver video en [[COLOR %s]%s[/COLOR]] Sub %s" % (color3, quality, label)
itemlist.append(item.clone(action="play", server="directo", title=title,
url=url, subtitle=url_sub, extra=item.url, quality=quality, language = label))
return itemlist
def play(item):
logger.info()
itemlist = []
try:
from core import filetools
ficherosubtitulo = filetools.join(config.get_data_path(), 'subtitulo_areadocu.srt')
if filetools.exists(ficherosubtitulo):
try:
filetools.remove(ficherosubtitulo)
except IOError:
logger.error("Error al eliminar el archivo " + ficherosubtitulo)
raise
data = httptools.downloadpage(item.subtitle, headers={'Referer': item.extra}).data
filetools.write(ficherosubtitulo, data)
subtitle = ficherosubtitulo
except:
subtitle = ""
logger.error("Error al descargar el subtítulo")
extension = item.url.rsplit("|", 1)[0][-4:]
itemlist.append(['%s %s [directo]' % (extension, item.calidad), item.url, 0, subtitle])
return itemlist

View File

@@ -1,14 +0,0 @@
{
"id": "asialiveaction",
"name": "Asialiveaction",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "asialiveaction.png",
"banner": "https://imgur.com/B1IOAu4.png",
"categories": [
"movie",
"tvshow",
"vos"
]
}

View File

@@ -1,222 +0,0 @@
# -*- coding: UTF-8 -*-
import re
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from lib import jsunpack
from platformcode import config, logger
host = "https://asialiveaction.com"
IDIOMAS = {'Japones': 'Japones'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['gvideo', 'openload','streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
url=urlparse.urljoin(host, "/pelicula"), type='pl'))
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
url=urlparse.urljoin(host, "/serie"), type='sr'))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/?s="))
autoplay.show_option(item.channel, itemlist)
return itemlist
def category(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
action = "lista"
if item.cat == 'abc':
data = scrapertools.find_single_match(data, '<div class="Body Container">(.+?)<main>')
action = "lista_a"
elif item.cat == 'genre':
data = scrapertools.find_single_match(data, '<a>Géneros<\/a><ul class="sub.menu">(.+?)<a>Año<\/a>')
elif item.cat == 'year':
data = scrapertools.find_single_match(data, '<a>Año<\/a><ul class="sub.menu">(.+?)<a>Idioma<\/a>')
elif item.cat == 'quality':
data = scrapertools.find_single_match(data, '<a>Calidad<\/a><ul class="sub-menu">(.+?)<a>Géneros<\/a>')
patron = '<li.*?><a href="(.*?)">(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
if scrapedtitle != 'Próximas Películas':
if not scrapedurl.startswith("http"): scrapedurl = host + scrapedurl
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, type='cat'))
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtype, scrapedthumbnail, scrapedyear, scrapedtitle ,scrapedurl in matches:
title="%s [%s]" % (scrapedtitle, scrapedyear)
new_item= Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail)
if scrapedtype.strip() == 'Serie':
new_item.contentSerieName = scrapedtitle
new_item.action = 'episodios'
new_item.type = 'sr'
else:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
new_item.type = 'pl'
itemlist.append(new_item)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
def episodios(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = data.replace('"ep0','"epp"')
patron = '(?is)MvTbImg B.*?href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'span>Episodio ([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedepi in matches:
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
#urls = scrapertools.find_multiple_matches(scrapedurls, 'href="([^"]+)')
itemlist.append(item.clone(action='findvideos', title=title, url=scrapedurl, thumbnail=scrapedthumbnail, type=item.type,
infoLabels=item.infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
url=item.url, action="add_serie_to_library", extra="episodios",
contentSerieName=item.contentSerieName))
return itemlist
def lista_a(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?is)Num">.*?href="([^"]+)".*?'
patron += 'data-src="([^"]+)".*?>.*?'
patron += '<strong>([^<]+)<.*?'
patron += '<td>([^<]+)<.*?'
patron += 'href.*?>([^"]+)<\/a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
if not scrapedthumbnail.startswith("http"): scrapedthumbnail = "https:" + scrapedthumbnail
action = "findvideos"
if "Serie" in scrapedtype: action = "episodios"
itemlist.append(item.clone(action=action, title=scrapedtitle, contentTitle=scrapedtitle, contentSerieName=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
infoLabels={'year':scrapedyear}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '(?is)class="TPost C">.*?href="([^"]+)".*?' #scrapedurl
patron += 'lazy-src="([^"]+)".*?>.*?' #scrapedthumbnail
patron += 'title">([^<]+)<.*?' #scrapedtitle
patron += 'year">([^<]+)<.*?' #scrapedyear
patron += 'href.*?>([^"]+)<\/a>' #scrapedtype
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
if not scrapedthumbnail.startswith("http"): scrapedthumbnail = "https:" + scrapedthumbnail
title="%s - %s" % (scrapedtitle,scrapedyear)
new_item = Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
type=scrapedtype, infoLabels={'year':scrapedyear})
if scrapedtype == 'Serie':
new_item.contentSerieName = scrapedtitle
new_item.action = 'episodios'
else:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
#pagination
url_next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)"')
if len(itemlist)>0 and url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data.replace("&quot;",'"').replace("amp;","").replace("#038;","")
matches = scrapertools.find_multiple_matches(data, 'TPlayerTb.*?id="([^"]+)".*?src="([^"]+)"')
matches_del = scrapertools.find_multiple_matches(data, '(?is)<!--<td>.*?-->')
# Borra los comentarios - que contienen enlaces duplicados
for del_m in matches_del:
data = data.replace(del_m, "")
# Primer grupo de enlaces
for id, url1 in matches:
language = scrapertools.find_single_match(data, '(?is)data-tplayernv="%s".*?span><span>([^<]+)' %id)
data1 = httptools.downloadpage(url1).data
url = scrapertools.find_single_match(data1, 'src="([^"]+)')
if "a-x" in url:
data1 = httptools.downloadpage(url, headers={"Referer":url1}).data
url = scrapertools.find_single_match(data1, 'src: "([^"]+)"')
if "embed.php" not in url:
if url:
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url))
continue
data1 = httptools.downloadpage(url).data
packed = scrapertools.find_single_match(data1, "(?is)eval\(function\(p,a,c,k,e.*?</script>")
unpack = jsunpack.unpack(packed)
urls = scrapertools.find_multiple_matches(unpack, '"file":"([^"]+).*?label":"([^"]+)')
for url2, quality in urls:
if url2:
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
# Segundo grupo de enlaces
matches = scrapertools.find_multiple_matches(data, '<span><a rel="nofollow" target="_blank" href="([^"]+)"')
for url in matches:
data1 = httptools.downloadpage(url).data
matches1 = scrapertools.find_multiple_matches(data1, '"ser".*?</tr>')
for ser in matches1:
ser = ser.replace("&#215;","x")
aud = scrapertools.find_single_match(ser, 'aud"><i class="([^"]+)')
sub = scrapertools.find_single_match(ser, 'sub"><i class="([^"]+)')
quality = scrapertools.find_single_match(ser, 'res">.*?x([^<]+)')
language = "Versión RAW"
if aud == "jp" and sub == "si":
language = "Sub. Español"
matches2 = scrapertools.find_multiple_matches(ser, 'href="([^"]+)')
for url2 in matches2:
if url2:
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,81 +0,0 @@
{
"id": "canalpelis",
"name": "CanalPelis",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast", "vose"],
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/canalpelisbg.jpg",
"thumbnail": "http://www.canalpelis.com/wp-content/uploads/2016/11/logo_web.gif",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,424 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel CanalPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "canalpelis"
host = "http://www.canalpelis.com/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas", action="peliculas", thumbnail=get_thumb('movies', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'movies/', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Géneros", action="generos", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'genre/', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Año de Estreno", action="year_release", thumbnail=get_thumb('year', auto=True),
text_blod=True, page=0, viewcontent='movies', url=host + 'release/',
viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0))
itemlist.append(item.clone(title="Series", action="series", extra='serie', url=host + 'tvshows/',
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
thumbnail=get_thumb('tvshows', auto=True), page=0))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<div class="thumbnail animation-2"><a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)" alt="([^"]+)" />.*?' # img and title
patron += '<span class="([^"]+)".*?' # tipo
patron += '<span class="year">([^<]+)</span>' # year
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
action="findvideos", infoLabels={"year": year},
thumbnail=scrapedthumbnail, text_color=color3, page=0))
paginacion = scrapertools.find_single_match(
data, '<a class="page larger" href="([^"]+)">\d+</a>')
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="» Siguiente »", url=paginacion))
tmdb.set_infoLabels(itemlist)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + "genre/cine-animacion/"
elif categoria == 'terror':
item.url = host + "genre/cine-terror/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == "» Siguiente »":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title.strip()
patron += '<span class="icon-star2"></span>(.*?)/div>.*?' # rating
patron += '<span class="quality">([^<]+)</span>.*?' # calidad
patron += '<a href="([^"]+)"><div class="see"></div>.*?' # url
patron += '<span>(\d+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 30]:
if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail:
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
scrapedtitle, year, quality)
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year},
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer", quality=quality))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(
data, "<span class=\"current\">\d+</span><a href='([^']+)'")
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
for item in itemlist:
if item.infoLabels['plot'] == '':
datas = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", datas)
item.fanart = scrapertools.find_single_match(
datas, "<meta property='og:image' content='([^']+)' />")
item.fanart = item.fanart.replace('w780', 'original')
item.plot = scrapertools.find_single_match(datas, '</h4><p>(.*?)</p>')
item.plot = scrapertools.htmlclean(item.plot)
item.infoLabels['director'] = scrapertools.find_single_match(
datas, '<div class="name"><a href="[^"]+">([^<]+)</a>')
item.infoLabels['genre'] = scrapertools.find_single_match(
datas, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>')
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="cat-item cat-item-[^"]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a> <i>([^<]+)</i></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, cantidad in matches:
if cantidad != '0' and scrapedtitle != '# Próximamente':
title = "%s (%s)" % (scrapedtitle, cantidad)
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=title, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot"))
return itemlist
def year_release(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">.*?'
patron += '<div class="texto">([^<]+)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches[item.page:item.page + 30]:
if plot == '':
plot = scrapertools.find_single_match(data, '<div class="texto">([^<]+)</div>')
scrapedtitle = scrapedtitle.replace('Ver ', '').replace(
' Online HD', '').replace('ver ', '').replace(' Online', '').replace(' (Serie TV)', '').strip()
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle, plot=plot,
thumbnail=scrapedthumbnail, contentType='tvshow'))
# url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(
data, '<link rel="next" href="([^"]+)" />')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = "<span class='title'>([^<]+)<i>.*?" # numeros de temporadas
patron += "<img src='([^']+)'>" # capitulos
# logger.info(datas)
matches = scrapertools.find_multiple_matches(datas, patron)
if len(matches) > 1:
for scrapedseason, scrapedthumbnail in matches:
scrapedseason = " ".join(scrapedseason.split())
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='temporadas')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadirselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
else:
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = "<div class='imagen'>.*?"
patron += "<div class='numerando'>(.*?)</div>.*?"
patron += "<a href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(datas, patron)
for scrapedtitle, scrapedurl, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+) - (\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
season, episode = match[0]
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode")
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
import base64
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = "data-post='(\d+)' data-nume='(\d+)'.*?img src='([^']+)'>"
matches = re.compile(patron, re.DOTALL).findall(data)
for id, option, lang in matches:
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
lang = lang.lower().strip()
idioma = {'mx': '[COLOR cornflowerblue](LAT)[/COLOR]',
'es': '[COLOR green](CAST)[/COLOR]',
'en': '[COLOR red](VOSE)[/COLOR]',
'gb': '[COLOR red](VOSE)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type': 'movie'}
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % host
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer': item.url}).data
hidden_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
new_data = httptools.downloadpage(hidden_url, follow_redirects=False)
try:
b64_url = scrapertools.find_single_match(new_data.headers['location'], "y=(.*)")
url = base64.b64decode(b64_url)
except:
url = hidden_url
if url != '':
itemlist.append(
Item(channel=item.channel, action='play', language=lang, infoLabels=item.infoLabels,
url=url, title='Ver en: ' + '[COLOR yellowgreen]%s [/COLOR]' + lang))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
itemlist.sort(key=lambda it: it.language, reverse=False)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "cartoonlatino",
"name": "Cartoon-Latino",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "http://i.imgur.com/wk6fRDZ.png",
"banner": "http://i.imgur.com/115c59F.png",
"categories": [
"tvshow"
]
}

View File

@@ -1,174 +0,0 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
host = "http://www.cartoon-latino.com/"
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload',
'vimple',
'gvideo',
'rapidvideo'
]
list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb('tvshows', auto=True)
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista_gen(item):
logger.info()
itemlist = []
data1 = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
patron_sec = '<section class="content">.+?<\/section>'
data = scrapertools.find_single_match(data1, patron_sec)
patron = '<article id=.+? class=.+?><div.+?>'
patron += '<a href="([^"]+)" title="([^"]+)' # scrapedurl, # scrapedtitle
patron += ' Capítulos Completos ([^"]+)">' # scrapedlang
patron += '<img.+? data-src=.+? data-lazy-src="([^"]+)"' # scrapedthumbnail
matches = scrapertools.find_multiple_matches(data, patron)
i = 0
for scrapedurl, scrapedtitle, scrapedlang, scrapedthumbnail in matches:
i = i + 1
if 'HD' in scrapedlang:
scrapedlang = scrapedlang.replace('HD', '')
title = scrapedtitle + " [ " + scrapedlang + "]"
itemlist.append(
Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">'
next_page_url = scrapertools.find_single_match(data, patron_pag)
if next_page_url != "" and i != 1:
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista_gen", title=">> Página siguiente", url=next_page_url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data, '<div class="su-list su-list-style-"><ul>(.+?)<\/ul><\/div>')
patron = "<a href='(.+?)'>(.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data_lista, patron)
for link, name in matches:
title = name + " [Latino]"
url = link
context1=[autoplay.context]
itemlist.append(
item.clone(title=title, url=url, plot=title, action="episodios", show=title,
context=context1))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data,
'<div class="su-list su-list-style-"><ulclass="lista-capitulos">.+?<\/div><\/p>')
if '&#215;' in data_lista:
data_lista = data_lista.replace('&#215;', 'x')
show = item.title
if "[Latino]" in show:
show = show.replace("[Latino]", "")
if "Ranma" in show:
patron_caps = '<\/i> <strong>.+?Capitulo ([^"]+)\: <a .+? href="([^"]+)">([^"]+)<\/a>'
else:
patron_caps = '<\/i> <strong>Capitulo ([^"]+)x.+?\: <a .+? href="([^"]+)">([^"]+)<\/a>'
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
scrapedplot = scrapertools.find_single_match(data, '<strong>Sinopsis<\/strong><strong>([^"]+)<\/strong><\/pre>')
number = 0
ncap = 0
A = 1
tempo=1
for temp, link, name in matches:
if A != temp and "Ranma" not in show:
number = 0
number = number + 1
if "Ranma" in show:
number,tempo=renumerar_ranma(number,tempo,18+1,1)
number,tempo=renumerar_ranma(number,tempo,22+1,2)
number,tempo=renumerar_ranma(number,tempo,24+1,3)
number,tempo=renumerar_ranma(number,tempo,24+1,4)
number,tempo=renumerar_ranma(number,tempo,24+1,5)
number,tempo=renumerar_ranma(number,tempo,24+1,6)
capi=str(number).zfill(2)
if "Ranma" in show:
title = "{0}x{1} - ({2})".format(str(tempo), capi, name)
else:
title = "{0}x{1} - ({2})".format(str(temp), capi, name)
url = link
A = temp
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def renumerar_ranma(number,tempo,final,actual):
if number==final and tempo==actual:
tempo=tempo+1
number=1
return number, tempo
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_function = scrapertools.find_single_match(data, '<!\[CDATA\[function (.+?)\]\]')
data_id = scrapertools.find_single_match(data,
"<script>\(adsbygoogle = window\.adsbygoogle \|\| \[\]\)\.push\({}\);<\/script><\/div><br \/>(.+?)<\/ins>")
if data_id == "":
data_id = scrapertools.find_single_match(data, "<p><center><br />.*?</center>")
itemla = scrapertools.find_multiple_matches(data_function, "src='(.+?)'")
serverid = scrapertools.find_multiple_matches(data_id, '<script>([^"]+)\("([^"]+)"\)')
for server, id in serverid:
for link in itemla:
if server in link:
url = link.replace('" + ID' + server + ' + "', str(id))
itemlist.append(item.clone(url=url, action="play",
title="Enlace encontrado en %s "
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
scrapertools.printMatches(itemlist)
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,30 +0,0 @@
{
"id": "ciberdocumentales",
"name": "CiberDocumentales",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "https://s9.postimg.cc/secdb5s8v/ciberdocumentales.png",
"banner": "https://s1.postimg.cc/sa486z0of/ciberdocumentales_banner.png",
"categories": [
"documentary"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,122 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from platformcode import logger
host = 'http://www.ciberdocumentales.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas", action="lista", thumbnail='https://s18.postimg.cc/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png', url=host))
itemlist.append(Item(channel=item.channel, title="Generos", action="generos", url=host,
thumbnail='https://s3.postimg.cc/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png'))
itemlist.append(Item(channel=item.channel, title="Mas Vistas", action="lista", url=host,
thumbnail='https://s9.postimg.cc/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png', extra='masvistas'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host,
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'))
return itemlist
def lista(item):
logger.info()
itemlist = []
if item.extra == 'buscar':
data = httptools.downloadpage(host + '/index.php?' + 'categoria=0&keysrc=' + item.text).data
else:
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.extra == 'masvistas':
patron = '<div class=bloquecenmarcado><a title=.*? target=_blank href=(.*?) class=game><img src=(.*?) alt=(.*?) title= class=bloquecenimg \/>.*?<strong>(.*?)<\/strong>'
else:
patron = '<div class=fotonoticia><a.*?target=_blank href=(.*?)><img src=(.*?) alt=(.*?) \/>.*?class=textonoticia>.*?\/><br \/>(.*?)<\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
url = host + scrapedurl
thumbnail = host + scrapedthumbnail
plot = scrapertools.htmlclean(scrapedplot)
plot = plot.decode('iso8859-1').encode('utf-8')
contentTitle = scrapedtitle
title = contentTitle
title = title.decode('iso8859-1').encode('utf-8')
fanart = ''
itemlist.append(
Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, contentTitle=contentTitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, 'class=current>.*?<\/span><a href=(.*?)>.*?<\/a>')
if next_page != '' and item.extra != 'masvistas':
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=host + next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<a style=text-transform:capitalize; href=(.*?)\/>(.*?)<\/a><\/span><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
thumbnail = ''
fanart = ''
title = scrapedtitle
url = host + scrapedurl
itemlist.append(
Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail,
fanart=fanart))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.text = texto
item.extra = 'buscar'
if texto != '':
return lista(item)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'documentales':
item.url = host
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,62 +0,0 @@
{
"id": "ciberpeliculashd",
"name": "Ciberpeliculashd",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "https://s17.postimg.cc/78tekxeov/ciberpeliculashd1.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,263 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
__channel__='ciberpeliculashd'
host = "http://ciberpeliculashd.net"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Películas", text_bold = True, folder = False))
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host + "/?peli=1",
thumbnail=get_thumb('newest', auto=True)))
itemlist.append(Item(channel = item.channel, title = " Por género", action = "filtro", url = host,
extra = "categories", thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel = item.channel, title = " Por calidad", action = "filtro", url = host,
extra = "qualitys", thumbnail=get_thumb('quality', auto=True)))
itemlist.append(Item(channel = item.channel, title = " Por idioma", action = "filtro", url = host,
extra = "languages", thumbnail=get_thumb('language', auto=True)))
itemlist.append(Item(channel = item.channel, title = " Por año", action = "filtro", url = host,
extra = "years", thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Series", text_bold = True, folder = False))
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "series",
url = host +"/series/?peli=1", thumbnail=get_thumb('newest', auto=True)))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s=",
thumbnail=get_thumb('search', auto=True)))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'loop-posts series.*?panel-pagination pagination-bottom')
patron = 'a href="([^"]+).*?'
patron += '((?:http|https)://image.tmdb.org[^"]+).*?'
patron += 'title="([^"]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
itemlist.append(Item(action = "capitulos",
channel = item.channel,
thumbnail = scrapedthumbnail,
title = scrapedtitle,
contentSerieName = scrapedtitle,
url = scrapedurl
))
if itemlist:
tmdb.set_infoLabels(itemlist)
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
next_page = scrapertools.find_single_match(item.url,".*?peli=")
next_page += "%s" %page
itemlist.append(Item(action = "series",
channel = item.channel,
title = "Página siguiente >>",
url = next_page
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
itemlist = capitulos(item)
return itemlist
def capitulos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?Content principal')
patron = '<a href="([^"]+).*?'
patron += '<span>(.*?)</span>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.strip()
s_e = scrapertools.get_season_and_episode(scrapedurl.replace("-",""))
if s_e != "":
season = s_e.split("x")[0]
episode = s_e.split("x")[1]
else:
season = episode = ""
scrapedtitle = s_e + " - " + scrapedtitle
item.infoLabels["episode"] = episode
item.infoLabels["season"] = season
itemlist.append(item.clone(action = "findvideos",
title = scrapedtitle,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title =""))
itemlist.append(item.clone(action = "add_serie_to_library",
channel = item.channel,
extra = "episodios",
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url = item.url
))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host + "/?peli=1"
elif categoria == 'infantiles':
item.url = host + '/categories/animacion/?peli=1'
elif categoria == 'terror':
item.url = host + '/categories/terror/?peli=1'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto + "&peli=1"
item.extra = "busca"
if texto != '':
return peliculas(item)
else:
return []
def filtro(item):
logger.info()
itemlist = []
filter = ""
filter_end = "data-uk-dropdown"
if item.extra == "categories":
filter = "genero"
elif item.extra == "qualitys":
filter = "calidad"
elif item.extra == "languages":
filter = "audio"
elif item.extra == "years":
filter = "ano"
filter_end = "<div style"
filter = host + "/?" + filter + "="
data = httptools.downloadpage(item.url).data
patron = 'uk-button btn-filter %s.*?%s' %(item.extra, filter_end)
bloque = scrapertools.find_single_match(data, patron)
patron = 'id="([^"]+).*?'
patron += 'label for.*?>([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, titulo in matches:
url = filter + url
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = titulo,
url = url + "&peli=1"
))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
infoLabels = dict()
filter = "uk-icon-angle-right next"
if item.extra == "busca":
filter = '<div class="post">'
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '%s.*?panel-pagination pagination-bottom' %(filter))
patron = 'a href="([^"]+)".*?'
patron += 'img alt="([^"]+)".*?'
patron += '((?:http|https)://image.tmdb.org[^"]+)".*?'
patron += 'a href="([^"]+)".*?'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedurl1 in matches:
scrapedtitle = scrapedtitle.replace(" Online imagen","").replace("Pelicula ","")
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]+)\)")
if year:
year = int(year)
else:
year = 0
fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(")
if "serie" in scrapedurl:
action = "capitulos"
infoLabels ['tvshowtitle'] = scrapedtitle
else:
action = "findvideos"
infoLabels ['tvshowtitle'] = ""
infoLabels ['year'] = year
itemlist.append(Item(action = action,
channel = item.channel,
fulltitle = fulltitle,
thumbnail = scrapedthumbnail,
infoLabels = infoLabels,
title = scrapedtitle,
url = scrapedurl
))
if itemlist:
tmdb.set_infoLabels(itemlist)
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
next_page = scrapertools.find_single_match(item.url,".*?peli=")
next_page += "%s" %page
itemlist.append(Item(action = "peliculas",
channel = item.channel,
title = "Página siguiente >>",
url = next_page
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?i)src=&quot;([^&]+)&'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
if ".gif" in scrapedurl:
continue
title = "Ver en: %s"
itemlist.append(item.clone(action = "play",
title = title,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la videoteca de KODI"
if item.contentChannel != "videolibrary":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -1,64 +0,0 @@
{
"id": "cine24h",
"name": "Cine24H",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast", "vose"],
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
"thumbnail": "https://cine24h.net/wp-content/uploads/2018/06/cine24hv2.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vose",
"direct"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,379 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel CanalPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "cine24h"
host = "https://cine24h.net/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)),
item.clone(title="Series", action="series", extra='serie', url=host + 'series/',
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
thumbnail=get_thumb('tvshows', auto=True), page=0),
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0)]
autoplay.show_option(item.channel, itemlist)
return itemlist
def menumovies(item):
logger.info()
itemlist = [item.clone(title="Novedades", action="peliculas", thumbnail=get_thumb('newest', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'peliculas/', viewmode="movie_with_plot"),
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + '?s=trfilter&trfilter=1&years%5B%5D=2018', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'peliculas-mas-vistas/', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="genresYears", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"),
item.clone(title="Estrenos por Año", action="genresYears", thumbnail=get_thumb('year', auto=True),
text_blod=True, page=0, viewcontent='movies', url=host,
viewmode="movie_with_plot"),
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0, extra='buscarP')]
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '</figure>(.*?)' # tipo
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
patron += '<span class="Year">([^<]+)</span>.*?' # year
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, tipo, scrapedtitle, year in matches[item.page:item.page + 30]:
if item.title == 'Buscar' and 'serie' in scrapedurl:
action = 'temporadas'
contentType = 'tvshow'
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
else:
action = 'findvideos'
contentType = 'movie'
title = scrapedtitle
itemlist.append(Item(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
return itemlist
def genresYears(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
data = scrapertools.decodeHtmlentities(data)
if item.title == "Estrenos por Año":
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
else:
patron_todas = 'Géneros</a>(.*?)</li></ul></li>'
data = scrapertools.find_single_match(data, patron_todas)
patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="peliculas"))
return itemlist
def year_release(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 1:
for scrapedseason in matches:
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
new_item.infoLabels['season'] = scrapedseason
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadirselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
else:
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+)x(\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
season, episode = match[0]
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode")
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
patron = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span>(.*?)</li>' # option, server, lang - quality
matches = re.compile(patron, re.DOTALL).findall(data)
for option, servername, quote in matches:
patron = '<span>(.*?) -([^<]+)</span'
match = re.compile(patron, re.DOTALL).findall(quote)
lang, quality = match[0]
quality = quality.strip()
headers = {'Referer': item.url}
url_1 = scrapertools.find_single_match(data,
'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
new_data = httptools.downloadpage(url_1, headers=headers).data
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "", new_data)
new_data = scrapertools.decodeHtmlentities(new_data)
url2 = scrapertools.find_single_match(new_data, '<iframe width="560" height="315" src="([^"]+)"')
url = url2 + '|%s' % url_1
if 'rapidvideo' in url2:
url = url2
lang = lang.lower().strip()
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'español': '[COLOR green](CAST)[/COLOR]',
'subespañol': '[COLOR red](VOS)[/COLOR]',
'sub': '[COLOR red](VOS)[/COLOR]'}
if lang in languages:
lang = languages[lang]
servername = servertools.get_server_from_url(url)
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
servername.title(), quality, lang)
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, quality=quality,
text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,61 +0,0 @@
{
"id": "cineasiaenlinea",
"name": "CineAsiaEnLinea",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "http://i.imgur.com/5KOU8uy.png?3",
"banner": "cineasiaenlinea.png",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en búsqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,177 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = "http://www.cineasiaenlinea.com/"
__channel__='cineasiaenlinea'
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
# Configuracion del canal
__perfil__ = int(config.get_setting('perfil', 'cineasiaenlinea'))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
if __perfil__ - 1 >= 0:
color1, color2, color3 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = ""
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="peliculas", title="Novedades", url=host + "archivos/peliculas",
thumbnail=get_thumb('newest', auto=True), text_color=color1,))
itemlist.append(item.clone(action="peliculas", title="Estrenos", url=host + "archivos/estrenos",
thumbnail=get_thumb('premieres', auto=True), text_color=color1))
itemlist.append(item.clone(action="indices", title="Por géneros", url=host,
thumbnail=get_thumb('genres', auto=True), text_color=color1))
itemlist.append(item.clone(action="indices", title="Por país", url=host, text_color=color1,
thumbnail=get_thumb('country', auto=True)))
itemlist.append(item.clone(action="indices", title="Por año", url=host, text_color=color1,
thumbnail=get_thumb('year', auto=True)))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = "%s?s=%s" % (host, texto.replace(" ", "+"))
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + "archivos/peliculas"
elif categoria == 'terror':
item.url = host + "genero/terror"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<h3><a href="([^"]+)">([^<]+)<.*?src="([^"]+)".*?<a rel="tag">([^<]+)<' \
'.*?<a rel="tag">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, year, calidad in matches:
title = re.sub(r' \((\d+)\)', '', scrapedtitle)
scrapedtitle += " [%s]" % calidad
infolab = {'year': year}
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, infoLabels=infolab,
contentTitle=title, contentType="movie", quality=calidad))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"')
if next_page:
itemlist.append(item.clone(title=">> Página Siguiente", url=next_page))
return itemlist
def indices(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
logger.info(data)
if "géneros" in item.title:
bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por genero</h4>(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<')
elif "año" in item.title:
bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por Año</h4>(.*?)</select>')
matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)">([^<]+)<')
else:
bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por Pais</h4>(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<')
for scrapedurl, scrapedtitle in matches:
if "año" in item.title:
scrapedurl = "%sfecha-estreno/%s" % (host, scrapedurl)
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=item.thumbnail, text_color=color3))
return itemlist
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
item.infoLabels["plot"] = scrapertools.find_single_match(data, '(?i)<h2>SINOPSIS.*?<p>(.*?)</p>')
item.infoLabels["trailer"] = scrapertools.find_single_match(data, 'src="(http://www.youtube.com/embed/[^"]+)"')
itemlist = servertools.find_video_items(item=item, data=data)
for it in itemlist:
it.thumbnail = item.thumbnail
it.text_color = color2
itemlist.append(item.clone(action="add_pelicula_to_library", title="Añadir película a la videoteca"))
if item.infoLabels["trailer"]:
folder = True
if config.is_xbmc():
folder = False
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Ver Trailer", folder=folder,
contextual=not folder))
return itemlist

View File

@@ -1,79 +0,0 @@
{
"id": "cinecalidad",
"name": "CineCalidad",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast", "por"],
"thumbnail": "https://s9.postimg.cc/58xyblsvj/cinecalidad.png",
"banner": "https://s32.postimg.cc/kihkdpx1x/banner_cinecalidad.png",
"categories": [
"movie",
"direct",
"vos",
"torrent"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Español",
"Portugues"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,462 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
IDIOMAS = {'latino': 'Latino', 'castellano': 'Español', 'portugues': 'Portugues'}
list_language = IDIOMAS.values()
list_quality = ['1080p', '720p', '480p', '360p', '240p', 'default']
list_servers = [
'yourupload',
'thevideos',
'filescdn',
'uptobox',
'okru',
'nowvideo',
'userscloud',
'pcloud',
'usersfiles',
'vidbull',
'openload',
'rapidvideo',
'streamango',
'directo',
'torrent'
]
host = 'http://www.cinecalidad.to'
thumbmx = 'http://flags.fmcdn.net/data/flags/normal/mx.png'
thumbes = 'http://flags.fmcdn.net/data/flags/normal/es.png'
thumbbr = 'http://flags.fmcdn.net/data/flags/normal/br.png'
current_lang = ''
def mainlist(item):
global host
idioma2 = "destacadas"
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
item.clone(title="CineCalidad Latino",
action="submenu",
host="http://cinecalidad.to/",
thumbnail=thumbmx,
extra="peliculas",
))
itemlist.append(item.clone(title="CineCalidad Castellano",
action="submenu",
host="http://cinecalidad.to/espana/",
thumbnail=thumbes,
extra="peliculas",
))
itemlist.append(
item.clone(title="CineCalidad Portugues",
action="submenu",
host="http://www.cinemaqualidade.to/",
thumbnail=thumbbr,
extra="filmes",
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def submenu(item):
idioma = 'peliculas'
idioma2 = "destacada"
host = item.host
if item.host == "http://www.cinemaqualidade.to/":
idioma = "filmes"
idioma2 = "destacado"
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel,
title=idioma.capitalize(),
action="peliculas",
url=host,
thumbnail=get_thumb('movies', auto=True),
fanart='https://s8.postimg.cc/6wqwy2c2t/peliculas.png',
))
itemlist.append(Item(channel=item.channel,
title="Destacadas",
action="peliculas",
url=host + "/genero-" + idioma + "/" + idioma2 + "/",
thumbnail=get_thumb('hot', auto=True),
fanart='https://s30.postimg.cc/humqxklsx/destacadas.png',
))
itemlist.append(Item(channel=item.channel,
title="Generos",
action="generos",
url=host + "/genero-" + idioma,
thumbnail=get_thumb('genres', auto=True),
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
))
itemlist.append(Item(channel=item.channel,
title="Por Año",
action="anyos",
url=host + idioma + "-por-ano",
thumbnail=get_thumb('year', auto=True),
fanart='https://s8.postimg.cc/7eoedwfg5/pora_o.png',
))
itemlist.append(Item(channel=item.channel,
title="Buscar",
action="search",
thumbnail=get_thumb('search', auto=True),
url=host + '/?s=',
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png',
host=item.host,
))
return itemlist
def anyos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href=([^>]+)>([^<]+)</a><br'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
title = scrapedtitle
thumbnail = item.thumbnail
plot = item.plot
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=item.thumbnail,
language=item.language
))
return itemlist
def generos(item):
tgenero = {"Comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
"Suspenso": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png",
"Drama": "https://s16.postimg.cc/94sia332d/drama.png",
"Acción": "https://s3.postimg.cc/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.cc/6su40czih/aventura.png",
"Romance": "https://s15.postimg.cc/fb5j8cl63/romance.png",
"Fantas\xc3\xada": "https://s13.postimg.cc/65ylohgvb/fantasia.png",
"Infantil": "https://s23.postimg.cc/g5rmazozv/infantil.png",
"Ciencia ficción": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.cc/yi0gij3gb/terror.png",
"Com\xc3\xa9dia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
"Suspense": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png",
"A\xc3\xa7\xc3\xa3o": "https://s3.postimg.cc/y6o9puflv/accion.png",
"Fantasia": "https://s13.postimg.cc/65ylohgvb/fantasia.png",
"Fic\xc3\xa7\xc3\xa3o cient\xc3\xadfica": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png"}
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li id=menu-item-.*? class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-.*?'
patron +='"><a href=([^>]+)>([^<]+)<\/a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
title = scrapedtitle
thumbnail = ''
plot = item.plot
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title=title, url=url,
thumbnail=thumbnail,
plot=plot,
fanart=item.thumbnail,
language=item.language
))
return itemlist
def peliculas(item):
logger.info()
global current_lang
itemlist = []
if 'cinemaqualidade' in item.url:
current_lang = 'portugues'
elif 'espana' in item.url:
current_lang = 'castellano'
elif 'cinecalidad' in item.url:
current_lang = 'latino'
data = httptools.downloadpage(item.url).data
patron = '<div class="home_post_cont.*? post_box">.*?<a href=([^>]+)>.*?src=([^ ]+).*?'
patron += 'title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
url = urlparse.urljoin(item.url, scrapedurl)
contentTitle = scrapedtitle
title = scrapedtitle + ' (' + scrapedyear + ')'
thumbnail = scrapedthumbnail
plot = scrapedplot
year = scrapedyear
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart='https://s31.postimg.cc/puxmvsi7v/cinecalidad.png',
contentTitle=contentTitle,
infoLabels={'year': year},
language=current_lang,
context=autoplay.context
))
try:
patron = "<link rel=next href=([^>]+)>"
next_page = re.compile(patron, re.DOTALL).findall(data)
itemlist.append(Item(channel=item.channel,
action="peliculas",
title="Página siguiente >>",
url=next_page[0],
fanart='https://s31.postimg.cc/puxmvsi7v/cinecalidad.png',
language=item.language
))
except:
pass
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def dec(item, dec_value):
link = []
val = item.split(' ')
link = map(int, val)
for i in range(len(link)):
link[i] = link[i] - int(dec_value)
real = ''.join(map(chr, link))
return (real)
def findvideos(item):
servidor = {"http://uptobox.com/": "uptobox",
"http://userscloud.com/": "userscloud",
"https://my.pcloud.com/publink/show?code=": "pcloud",
"http://thevideos.tv/": "thevideos",
"http://ul.to/": "uploadedto",
"http://turbobit.net/": "turbobit",
"http://www.cinecalidad.com/protect/v.html?i=": "cinecalidad",
"http://www.mediafire.com/download/": "mediafire",
"https://www.youtube.com/watch?v=": "youtube",
"http://thevideos.tv/embed-": "thevideos",
"//www.youtube.com/embed/": "youtube",
"http://ok.ru/video/": "okru",
"http://ok.ru/videoembed/": "okru",
"http://www.cinemaqualidade.com/protect/v.html?i=": "cinemaqualidade.com",
"http://usersfiles.com/": "usersfiles",
"https://depositfiles.com/files/": "depositfiles",
"http://www.nowvideo.sx/video/": "nowvideo",
"http://vidbull.com/": "vidbull",
"http://filescdn.com/": "filescdn",
"https://www.yourupload.com/watch/": "yourupload",
"http://www.cinecalidad.to/protect/gdredirect.php?l=": "directo",
"https://openload.co/embed/": "openload",
"https://streamango.com/embed/f/": "streamango",
"https://www.rapidvideo.com/embed/": "rapidvideo",
}
logger.info()
itemlist = []
duplicados = []
if 'cinemaqualidade' in item.url:
lang = 'portugues'
elif 'espana' in item.url:
lang = 'castellano'
elif 'cinecalidad' in item.url:
lang = 'latino'
data = httptools.downloadpage(item.url).data
patron = 'target=_blank.*? service=.*? data="(.*?)"><li>(.*?)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
server_url = {'YourUpload': 'https://www.yourupload.com/embed/',
'Openload': 'https://openload.co/embed/',
'TVM': 'https://thevideo.me/embed-',
'Streamango': 'https://streamango.com/embed/',
'RapidVideo': 'https://www.rapidvideo.com/embed/',
'Trailer': '',
'BitTorrent': '',
'Mega': '',
'MediaFire': ''}
dec_value = scrapertools.find_single_match(data, 'String\.fromCharCode\(parseInt\(str\[i\]\)-(\d+)\)')
torrent_link = scrapertools.find_single_match(data, '<a href="/protect/v\.php\?i=([^"]+)"')
if torrent_link != '':
import urllib
base_url = '%s/protect/v.php' % host
post = {'i': torrent_link, 'title': item.title}
post = urllib.urlencode(post)
headers = {'Referer': item.url}
protect = httptools.downloadpage(base_url + '?' + post, headers=headers).data
url = scrapertools.find_single_match(protect, 'value="(magnet.*?)"')
server = 'torrent'
title = item.contentTitle + ' (%s)' % server
quality = 'default'
language = IDIOMAS[lang]
new_item = Item(channel=item.channel,
action='play',
title=title,
fulltitle=item.contentTitle,
url=url,
language=language,
thumbnail=item.thumbnail,
quality=quality,
server=server
)
itemlist.append(new_item)
for video_cod, server_id in matches:
if server_id not in ['Mega', 'MediaFire', 'Trailer', '']:
video_id = dec(video_cod, dec_value)
if server_id in server_url:
server = server_id.lower()
thumbnail = item.thumbnail
if server_id == 'TVM':
server = 'thevideome'
url = server_url[server_id] + video_id + '.html'
else:
url = server_url[server_id] + video_id
title = item.contentTitle + ' (%s)' % server
quality = 'default'
if server_id not in ['Mega', 'MediaFire', 'Trailer']:
language = [IDIOMAS[lang], 'vose']
if url not in duplicados:
new_item = Item(channel=item.channel,
action='play',
title=title,
fulltitle=item.contentTitle,
url=url,
language= language,
thumbnail=thumbnail,
quality=quality,
server=server
)
itemlist.append(new_item)
duplicados.append(url)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
# itemlist.append(trailer_item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle,
))
return itemlist
def get_urls(item, link):
from core import jsontools
logger.info()
url = 'http://www.cinecalidad.to/ccstream/ccstream.php'
headers = dict()
headers["Referer"] = item.url
post = 'link=%s' % link
data = httptools.downloadpage(url, post=post, headers=headers).data
dict_data = jsontools.load(data)
return dict_data['link']
def play(item):
logger.info()
itemlist = []
if 'juicyapi' not in item.url:
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
else:
itemlist.append(item)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = 'http://www.cinecalidad.com'
elif categoria == 'infantiles':
item.url = 'http://www.cinecalidad.com/genero-peliculas/infantil/'
elif categoria == 'terror':
item.url = 'http://www.cinecalidad.com/genero-peliculas/terror/'
elif categoria == 'castellano':
item.url = 'http://www.cinecalidad.com/espana/'
itemlist = peliculas(item)
if itemlist[-1].title == 'Página siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "-")
if item.host != '':
host_list = [item.host]
else:
host_list = ['http://www.cinecalidad.to', 'http://cinecalidad.to/espana/']
for host_name in host_list:
item.url = host_name + '?s=' + texto
if texto != '':
itemlist.extend(peliculas(item))
return itemlist

View File

@@ -1,75 +0,0 @@
{
"id": "cinedetodo",
"name": "CINEDETODO",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "https://s31.postimg.cc/win1ffxyj/cinedetodo.png",
"banner": "",
"version": 1,
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,321 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel CineDeTodo -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://www.cinedetodo.net/'
IDIOMAS = {'Latino': 'LAT'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['gounlimited', 'rapidvideo', 'vshare', 'clipwatching', 'jawclowd', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Películas", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='MovieList'))
itemlist.append(Item(channel=item.channel, title="Series", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='Series'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def sub_menu(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host,
thumbnail=get_thumb('last', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True), type=item.type ))
if item.type != 'Series':
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True), type=item.type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
full_data = data
if item.section != '':
data = scrapertools.find_single_match(data, 'class="MovieList NoLmtxt(.*?)</ul>')
else:
data = scrapertools.find_single_match(data, '<!--<%s>.*?class="MovieList NoLmtxt(.*?)</ul>' % item.type)
if item.section == 'alpha':
patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?<img src="([^"]+)" alt=.*?'
patron += '<strong>([^"]+)</strong>.*?<td>(\d{4})</td>'
matches = re.compile(patron, re.DOTALL).findall(full_data)
else:
patron = '<article.*?<a href="(.*?)">.*?<img src="(.*?)" alt=.*?'
patron += '<h3 class="Title">(.*?)<\/h3>.*?date_range">(\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if year == '':
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
cleantitle = scrapedtitle[0].strip()
else:
cleantitle = scrapedtitle
cleantitle = re.sub('\(.*?\)', '', cleantitle)
if not config.get_setting('unify'):
title = '%s [%s]'%(cleantitle, year)
else:
title = cleantitle
thumbnail = 'http:'+scrapedthumbnail
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels = {'year': year}
)
if 'series' not in url:
new_item.contentTitle = cleantitle
new_item.action='findvideos'
else:
new_item.contentSerieName = cleantitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
if item.type == 'Series':
url = host + '?tr_post_type=2'
else:
url = host + '?tr_post_type=1'
data = get_source(url)
action = 'list_all'
if item.section == 'genre':
patron = '<a href="([^ ]+)" class="Button STPb">(.*?)</a>'
elif item.section == 'alpha':
patron = '<li><a href="(.*?letter.*?)">(.*?)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
if title != 'Ver más':
if item.type == 'Series':
url =url + '?tr_post_type=2'
else:
url = url + '?tr_post_type=1'
if 'serie'in title.lower():
continue
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section,
type=item.type)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada <span>(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
full_data=get_source(item.url)
data = scrapertools.find_single_match(full_data, 'Temporada <span>\d+.*?</ul>')
patron='<span class="Num">(\d+)<.*?<a href="([^"]+)".*?"MvTbTtl".*?">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.decodeHtmlentities(data)
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)" frameborder')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
language = opt_data[0].strip()
language = re.sub('\(|\)', '', language)
quality = opt_data[1].strip()
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play', infoLabels=item.infoLabels))
elif 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.section = 'search'
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host+'animacion/?tr_post_type=1'
elif categoria == 'terror':
item.url = host+'terror/?tr_post_type=1'
item.type = 'MovieList'
item.section = 'search'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,173 +0,0 @@
{
"id": "cinefox",
"name": "Cinefox",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "cinefox.png",
"banner": "cinefox.png",
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE",
"VO"
]
},
{
"id": "save_last_search",
"type": "bool",
"label": "Guardar última búsqueda",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - Series",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1",
"Ninguno"
]
},
{
"id": "menu_info",
"type": "bool",
"label": "Mostrar menú intermedio película/episodio",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "last_page",
"type": "bool",
"label": "Ocultar opción elegir página en películas (Kodi)",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "filtro_defecto_peliculas",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_peliculas1",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_peliculas2",
"type": "label",
"enabled": true,
"visible": false
},
{
"pers_peliculas3": {
"type": "label",
"enabled": true,
"visible": false
}
},
{
"id": "filtro_defecto_series",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_series1",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_series2",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_series3",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "last_search",
"type": "text",
"enabled": true,
"visible": false
}
]
}

View File

@@ -1,804 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
IDIOMAS = {'Latino': 'LAT', 'Castellano':'CAST', 'Vo':'VO', 'Vose': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'powvideo', 'rapidvideo', 'streamango', 'streamcloud', 'flashx', 'gamovideo', 'streamplay']
__modo_grafico__ = config.get_setting('modo_grafico', 'cinefox')
__perfil__ = int(config.get_setting('perfil', "cinefox"))
__menu_info__ = config.get_setting('menu_info', 'cinefox')
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cinefox')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cinefox')
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://www.cinefox.tv"
def mainlist(item):
logger.info()
item.text_color = color1
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(action="seccion_peliculas", title="Películas", fanart="http://i.imgur.com/PjJaW8o.png",
url=host + "/catalogue?type=peliculas", thumbnail=get_thumb('movies', auto=True)))
# Seccion series
itemlist.append(item.clone(action="seccion_series", title="Series",
url=host + "/ultimos-capitulos", fanart="http://i.imgur.com/9loVksV.png",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(item.clone(action="peliculas", title="Documentales", fanart="http://i.imgur.com/Q7fsFI6.png",
url=host + "/catalogue?type=peliculas&genre=documental",
thumbnail=get_thumb('documentaries', auto=True)))
if config.get_setting("adult_mode") != 0:
itemlist.append(item.clone(action="peliculas", title="Sección Adultos +18",
url=host + "/catalogue?type=adultos",
fanart="http://i.imgur.com/kIvE1Zh.png", thumbnail=get_thumb('adults', auto=True)))
itemlist.append(item.clone(title="Buscar...", action="local_search", thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search?q=%s" % texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def local_search(item):
logger.info()
text = ""
if config.get_setting("save_last_search", item.channel):
text = config.get_setting("last_search", item.channel)
from platformcode import platformtools
texto = platformtools.dialog_input(default=text, heading="Buscar en Cinefox")
if texto is None:
return
if config.get_setting("save_last_search", item.channel):
config.set_setting("last_search", texto, item.channel)
return search(item, texto)
def busqueda(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="poster-media-card">(.*?)(?:<li class="search-results-item media-item">|<footer>)'
bloque = scrapertools.find_multiple_matches(data, patron)
for match in bloque:
patron = 'href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?' \
'<p class="search-results-main-info">.*?del año (\d+).*?' \
'p class.*?>(.*?)<'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, year, plot in matches:
scrapedtitle = scrapedtitle.capitalize()
item.infoLabels["year"] = year
plot = scrapertools.htmlclean(plot)
new_item = Item(channel=item.channel, thumbnail= scrapedthumbnail, plot=plot)
if "/serie/" in scrapedurl:
new_item.show = scrapedtitle
new_item.contentType = 'tvshow'
scrapedurl += "/episodios"
title = " [Serie]"
new_item.action = 'episodios'
elif "/pelicula/" in scrapedurl:
new_item.action = "findvideos"
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
#title = " [Película]"
new_item.contentType = "movie"
new_item.extra='media'
new_item.contentTitle= scrapedtitle
new_item.infoLabels['filtro'] = filter_list
else:
continue
new_item.title = scrapedtitle + " (" + year + ")"
new_item.url = scrapedurl
itemlist.append(new_item)
#itemlist.append(item.clone(action=action, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
# contentTitle=scrapedtitle, fulltitle=scrapedtitle,
# plot=plot, show=show, text_color=color2, contentType=contentType))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Más resultados')
if next_page != "":
next_page = urlparse.urljoin(host, next_page)
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Siguiente", url=next_page,
thumbnail=item.thumbnail, text_color=color3))
return itemlist
def filtro(item):
logger.info()
list_controls = []
valores = {}
strings = {}
# Se utilizan los valores por defecto/guardados o los del filtro personalizado
if not item.values:
valores_guardados = config.get_setting("filtro_defecto_" + item.extra, item.channel)
else:
valores_guardados = item.values
item.values = ""
if valores_guardados:
dict_values = valores_guardados
else:
dict_values = None
if dict_values:
dict_values["filtro_per"] = 0
excluidos = ['País', 'Películas', 'Series', 'Destacar']
data = httptools.downloadpage(item.url).data
matches = scrapertools.find_multiple_matches(data, '<div class="dropdown-sub[^>]+>(\S+)(.*?)</ul>')
i = 0
for filtro_title, values in matches:
if filtro_title in excluidos: continue
filtro_title = filtro_title.replace("Tendencia", "Ordenar por")
id = filtro_title.replace("Género", "genero").replace("Año", "year").replace(" ", "_").lower()
list_controls.append({'id': id, 'label': filtro_title, 'enabled': True,
'type': 'list', 'default': 0, 'visible': True})
valores[id] = []
valores[id].append('')
strings[filtro_title] = []
list_controls[i]['lvalues'] = []
if filtro_title == "Ordenar por":
list_controls[i]['lvalues'].append('Más recientes')
strings[filtro_title].append('Más recientes')
else:
list_controls[i]['lvalues'].append('Cualquiera')
strings[filtro_title].append('Cualquiera')
patron = '<li>.*?(?:genre|release|quality|language|order)=([^"]+)">([^<]+)<'
matches_v = scrapertools.find_multiple_matches(values, patron)
for value, key in matches_v:
if value == "action-adventure": continue
list_controls[i]['lvalues'].append(key)
valores[id].append(value)
strings[filtro_title].append(key)
i += 1
item.valores = valores
item.strings = strings
if "Filtro Personalizado" in item.title:
return filtrado(item, valores_guardados)
list_controls.append({'id': 'espacio', 'label': '', 'enabled': False,
'type': 'label', 'default': '', 'visible': True})
list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True,
'type': 'bool', 'default': False, 'visible': True})
list_controls.append({'id': 'filtro_per', 'label': 'Guardar filtro en acceso directo...', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No guardar', 'Filtro 1',
'Filtro 2', 'Filtro 3']})
list_controls.append({'id': 'remove', 'label': 'Eliminar filtro personalizado...', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No eliminar', 'Filtro 1',
'Filtro 2', 'Filtro 3']})
from platformcode import platformtools
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption="Filtra los resultados", item=item, callback='filtrado')
def filtrado(item, values):
values_copy = values.copy()
# Guarda el filtro para que sea el que se cargue por defecto
if "save" in values and values["save"]:
values_copy.pop("remove")
values_copy.pop("filtro_per")
values_copy.pop("save")
config.set_setting("filtro_defecto_" + item.extra, values_copy, item.channel)
# Elimina el filtro personalizado elegido
if "remove" in values and values["remove"] != 0:
config.set_setting("pers_" + item.extra + str(values["remove"]), "", item.channel)
values_copy = values.copy()
# Guarda el filtro en un acceso directo personalizado
if "filtro_per" in values and values["filtro_per"] != 0:
index = item.extra + str(values["filtro_per"])
values_copy.pop("filtro_per")
values_copy.pop("save")
values_copy.pop("remove")
config.set_setting("pers_" + index, values_copy, item.channel)
genero = item.valores["genero"][values["genero"]]
year = item.valores["year"][values["year"]]
calidad = item.valores["calidad"][values["calidad"]]
idioma = item.valores["idioma"][values["idioma"]]
order = item.valores["ordenar_por"][values["ordenar_por"]]
strings = []
for key, value in dict(item.strings).items():
key2 = key.replace("Género", "genero").replace("Año", "year").replace(" ", "_").lower()
strings.append(key + ": " + value[values[key2]])
item.valores = "Filtro: " + ", ".join(sorted(strings))
item.strings = ""
item.url = host + "/catalogue?type=%s&genre=%s&release=%s&quality=%s&language=%s&order=%s" % \
(item.extra, genero, year, calidad, idioma, order)
return globals()[item.extra](item)
def seccion_peliculas(item):
logger.info()
itemlist = []
# Seccion peliculas
itemlist.append(item.clone(action="peliculas", title="Novedades", fanart="http://i.imgur.com/PjJaW8o.png",
url=host + "/catalogue?type=peliculas", thumbnail=get_thumb('newest', auto=True)))
itemlist.append(item.clone(action="peliculas", title="Estrenos",
url=host + "/estrenos-de-cine", fanart="http://i.imgur.com/PjJaW8o.png",
thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(item.clone(action="filtro", title="Filtrar películas", extra="peliculas",
url=host + "/catalogue?type=peliculas",
fanart="http://i.imgur.com/PjJaW8o.png"))
# Filtros personalizados para peliculas
for i in range(1, 4):
filtros = config.get_setting("pers_peliculas" + str(i), item.channel)
if filtros:
title = "Filtro Personalizado " + str(i)
new_item = item.clone()
new_item.values = filtros
itemlist.append(new_item.clone(action="filtro", title=title, fanart="http://i.imgur.com/PjJaW8o.png",
url=host + "/catalogue?type=peliculas", extra="peliculas"))
itemlist.append(item.clone(action="mapa", title="Mapa de películas", extra="peliculas",
url=host + "/mapa-de-peliculas",
fanart="http://i.imgur.com/PjJaW8o.png"))
return itemlist
def seccion_series(item):
logger.info()
itemlist = []
# Seccion series
itemlist.append(item.clone(action="ultimos", title="Últimos capítulos",
url=host + "/ultimos-capitulos", fanart="http://i.imgur.com/9loVksV.png",
thumbnail=get_thumb('new episodes', auto=True)))
itemlist.append(item.clone(action="series", title="Series recientes",
url=host + "/catalogue?type=series",
fanart="http://i.imgur.com/9loVksV.png", thumbnail=get_thumb('recents', auto=True)))
itemlist.append(item.clone(action="filtro", title="Filtrar series", extra="series",
url=host + "/catalogue?type=series",
fanart="http://i.imgur.com/9loVksV.png"))
# Filtros personalizados para series
for i in range(1, 4):
filtros = config.get_setting("pers_series" + str(i), item.channel)
if filtros:
title = " Filtro Personalizado " + str(i)
new_item = item.clone()
new_item.values = filtros
itemlist.append(new_item.clone(action="filtro", title=title, fanart="http://i.imgur.com/9loVksV.png",
url=host + "/catalogue?type=series", extra="series"))
itemlist.append(item.clone(action="mapa", title="Mapa de series", extra="series",
url=host + "/mapa-de-series",
fanart="http://i.imgur.com/9loVksV.png"))
return itemlist
def mapa(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="sitemap-initial"> <a class="initial-link" href="([^"]+)">(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + scrapedurl
scrapedtitle = scrapedtitle.capitalize()
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action=item.extra, extra="mapa"))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
if "valores" in item and item.valores:
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
action = "findvideos"
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data,
'<div class="media-card "(.*?)<div class="hidden-info">')
for match in bloque:
if item.extra == "mapa":
patron = '.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(host, scrapedurl)
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
text_color=color2, contentType="movie", infoLabels={'filtro':filter_list}))
else:
patron = '<div class="audio-info">(.*?)<div (class="quality.*?)src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for idiomas, calidad, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
calidad = scrapertools.find_single_match(calidad, '<div class="quality-info".*?>([^<]+)</div>')
if calidad:
calidad = calidad.capitalize().replace("Hd", "HD")
audios = []
if "medium-es" in idiomas: audios.append('CAST')
if "medium-vs" in idiomas: audios.append('VOSE')
if "medium-la" in idiomas: audios.append('LAT')
if "medium-en" in idiomas or 'medium-"' in idiomas:
audios.append('VO')
title = "%s [%s]" % (scrapedtitle, "/".join(audios))
if calidad:
title += " (%s)" % calidad
url = urlparse.urljoin(host, scrapedurl)
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
text_color=color2, contentType="movie", quality=calidad, language=audios,
infoLabels={'filtro':filter_list}))
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if next_page != "" and item.title != "":
itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Siguiente", url=next_page,
thumbnail=item.thumbnail, extra=item.extra, text_color=color3))
if not config.get_setting("last_page", item.channel) and config.is_xbmc():
itemlist.append(Item(channel=item.channel, action="select_page", title="Ir a página...", url=next_page,
thumbnail=item.thumbnail, text_color=color5))
return itemlist
def ultimos(item):
logger.info()
item.text_color = color2
# if __menu_info__:
# action = "menu_info_episode"
# else:
action = "episodios"
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data, '<div class="media-card "(.*?)<div class="info-availability '
'one-line">')
for match in bloque:
patron = '<div class="audio-info">(.*?)<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for idiomas, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
show = re.sub(r'(\s*[\d]+x[\d]+\s*)', '', scrapedtitle)
audios = []
if "medium-es" in idiomas: audios.append('CAST')
if "medium-vs" in idiomas: audios.append('VOSE')
if "medium-la" in idiomas: audios.append('LAT')
if "medium-en" in idiomas or 'medium-"' in idiomas:
audios.append('VO')
title = "%s - %s" % (show, re.sub(show, '', scrapedtitle))
if audios:
title += " [%s]" % "/".join(audios)
url = urlparse.urljoin(host, scrapedurl)
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=scrapedthumbnail,
contentSerieName=show, fulltitle=show, show=show,
text_color=color2, extra="ultimos", contentType="tvshow"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
if next_page != "":
itemlist.append(item.clone(action="ultimos", title=">> Siguiente", url=next_page, text_color=color3))
return itemlist
def series(item):
logger.info()
itemlist = []
if "valores" in item:
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data, '<div class="media-card "(.*?)<div class="hidden-info">')
for match in bloque:
patron = '<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(host, scrapedurl + "/episodios")
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, contentSerieName=scrapedtitle, fulltitle=scrapedtitle,
show=scrapedtitle, text_color=color2, contentType="tvshow"))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
except:
pass
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
if next_page != "":
title = ">> Siguiente - Página " + scrapertools.find_single_match(next_page, 'page=(\d+)')
itemlist.append(Item(channel=item.channel, action="series", title=title, url=next_page,
thumbnail=item.thumbnail, extra=item.extra, text_color=color3))
return itemlist
def menu_info(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
if year != "" and not "tmdb_id" in item.infoLabels:
try:
from core import tmdb
item.infoLabels["year"] = year
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if item.infoLabels["plot"] == "":
sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
id = scrapertools.find_single_match(item.url, '/(\d+)/')
data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id).data
trailer_url = jsontools.load(data_trailer)["video"]["url"]
if trailer_url != "":
item.infoLabels["trailer"] = trailer_url
title = "Ver enlaces %s - [" + item.contentTitle + "]"
itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="media", type="streaming"))
itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="media", type="download"))
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail,
fanart=item.fanart, fulltitle=item.fulltitle,
extra="media|"))
return itemlist
def episodios(item):
logger.info()
itemlist = []
if item.extra == "ultimos":
data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
item.url += "/episodios"
data = httptools.downloadpage(item.url).data
#if "episodios" in item.extra or not __menu_info__ or item.path:
action = "findvideos"
# else:
# action = "menu_info_episode"
patron = '<div class="ep-list-number">.*?href="([^"]+)">([^<]+)</a>.*?<span class="name">([^<]+)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, episode, scrapedtitle in matches:
new_item = item.clone(action=action, url=scrapedurl, text_color=color2, contentType="episode")
new_item.contentSeason = episode.split("x")[0]
new_item.contentEpisodeNumber = episode.split("x")[1]
new_item.title = episode + " - " + scrapedtitle
new_item.extra = "episode"
if "episodios" in item.extra or item.path:
new_item.extra = "episode|"
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist.reverse()
if "episodios" not in item.extra and not item.path:
id = scrapertools.find_single_match(item.url, '/(\d+)/')
data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id).data
item.infoLabels["trailer"] = jsontools.load(data_trailer)["video"]["url"]
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, action="add_serie_to_library", text_color=color5,
title="Añadir serie a la videoteca", show=item.show, thumbnail=item.thumbnail,
url=item.url, fulltitle=item.fulltitle, fanart=item.fanart,
extra="episodios###episodios",
contentTitle=item.fulltitle))
return itemlist
def menu_info_episode(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.show == "":
item.show = scrapertools.find_single_match(data, 'class="h1-like media-title".*?>([^<]+)</a>')
episode = scrapertools.find_single_match(data, '<span class="indicator">([^<]+)</span>')
item.infoLabels["season"] = episode.split("x")[0]
item.infoLabels["episode"] = episode.split("x")[1]
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if item.infoLabels["plot"] == "":
sinopsis = scrapertools.find_single_match(data, 'id="episode-plot">(.*?)</p>')
if not "No hay sinopsis" in sinopsis:
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
title = "Ver enlaces %s - [" + item.show + " " + episode + "]"
itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="episode", type="streaming"))
itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="episode", type="download"))
siguiente = scrapertools.find_single_match(data, '<a class="episode-nav-arrow next" href="([^"]+)" title="([^"]+)"')
if siguiente:
titulo = ">> Siguiente Episodio - [" + siguiente[1] + "]"
itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=siguiente[0], extra="",
text_color=color1))
patron = '<a class="episode-nav-arrow previous" href="([^"]+)" title="([^"]+)"'
anterior = scrapertools.find_single_match(data, patron)
if anterior:
titulo = "<< Episodio Anterior - [" + anterior[1] + "]"
itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=anterior[0], extra="",
text_color=color3))
url_serie = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
url_serie += "/episodios"
itemlist.append(item.clone(title="Ir a la lista de capítulos", action="episodios", url=url_serie, extra="",
text_color=color4))
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if not "|" in item.extra and not __menu_info__:
data = httptools.downloadpage(item.url, add_referer=True).data
year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
if year != "" and not "tmdb_id" in item.infoLabels:
try:
from core import tmdb
item.infoLabels["year"] = year
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if item.infoLabels["plot"] == "":
sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
id = scrapertools.find_single_match(item.url, '/(\d+)/')
if "|" in item.extra or not __menu_info__:
extra = item.extra
if "|" in item.extra:
extra = item.extra[:-1]
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "streaming")
itemlist=(get_enlaces(item, url, "Online"))
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "download")
itemlist.extend(get_enlaces(item, url, "de Descarga"))
if extra == "media":
data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id).data
trailer_url = jsontools.load(data_trailer)["video"]["url"]
if trailer_url != "":
item.infoLabels["trailer"] = trailer_url
title = "Ver enlaces %s - [" + item.contentTitle + "]"
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
if config.get_videolibrary_support() and not "|" in item.extra:
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail,
fanart=item.fanart, fulltitle=item.fulltitle,
extra="media|"))
else:
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, item.extra, item.type)
type = item.type.replace("streaming", "Online").replace("download", "de Descarga")
itemlist.extend(get_enlaces(item, url, type))
if __comprueba_enlaces__:
for it in itemlist:
if it.server != '' and it.url != '':
it.url = normalizar_url(it.url, it.server)
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def normalizar_url(url, server):
# Pasar por findvideosbyserver para para obtener url a partir de los pattern/url de los json de servidores
# Excepciones copiadas de la funcion play
url = url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
enlaces = servertools.findvideosbyserver(url, server)[0]
if enlaces[1] != '':
return enlaces[1]
return url
def get_enlaces(item, url, type):
itemlist = []
#itemlist.append(item.clone(action="", title="Enlaces %s" % type, text_color=color1))
data = httptools.downloadpage(url, add_referer=True).data
if type == "Online":
gg = httptools.downloadpage(item.url, add_referer=True).data
bloque = scrapertools.find_single_match(gg, 'class="tab".*?button show')
patron = 'a href="#([^"]+)'
patron += '.*?language-ES-medium ([^"]+)'
patron += '.*?</i>([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedopcion, scrapedlanguage, scrapedcalidad in matches:
google_url = scrapertools.find_single_match(bloque, 'id="%s.*?src="([^"]+)' % scrapedopcion)
if "medium-es" in scrapedlanguage: language = "CAST"
if "medium-en" in scrapedlanguage: language = "VO"
if "medium-vs" in scrapedlanguage: language = "VOSE"
if "medium-la" in scrapedlanguage: language = "LAT"
titulo = " [%s/%s]" % (language, scrapedcalidad.strip())
itemlist.append(
item.clone(action="play", url=google_url, title=" Ver en Gvideo" + titulo, text_color=color2,
extra="", server="gvideo", language=language, quality=scrapedcalidad.strip()))
patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \
'.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
if matches:
for scrapedurl, idioma, server, calidad in matches:
if server == "streamin": server = "streaminto"
if server == "waaw" or server == "miracine": server = "netutv"
if server == "ul": server = "uploadedto"
if server == "player": server = "vimpleru"
if servertools.is_server_enabled(server):
scrapedtitle = " Ver en " + server.capitalize() + " [" + idioma + "/" + calidad + "]"
itemlist.append(item.clone(action="play", url=scrapedurl, title=scrapedtitle, text_color=color2,
extra="", server=server, language=IDIOMAS[idioma]))
if len(itemlist) == 1:
itemlist.append(item.clone(title=" No hay enlaces disponibles", action="", text_color=color2))
return itemlist
def play(item):
logger.info()
itemlist = []
if item.extra != "" and "google" not in item.url:
post = "id=%s" % item.extra
data = httptools.downloadpage(host + "/goto/", post=post, add_referer=True).data
item.url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')
url = item.url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
if item.server:
enlaces = servertools.findvideosbyserver(url, item.server)[0]
else:
enlaces = servertools.findvideos(url)[0]
itemlist.append(item.clone(url=enlaces[1], server=enlaces[2]))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host + "/catalogue?type=peliculas"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
if categoria == "series":
item.url = host + "/ultimos-capitulos"
item.action = "ultimos"
itemlist = ultimos(item)
if itemlist[-1].action == "ultimos":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def select_page(item):
import xbmcgui
dialog = xbmcgui.Dialog()
number = dialog.numeric(0, "Introduce el número de página")
if number != "":
item.url = re.sub(r'page=(\d+)', "page=" + number, item.url)
return peliculas(item)

View File

@@ -1,67 +0,0 @@
{
"id": "cinemahd",
"name": "CinemaHD",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "https://s13.postimg.cc/bnesayzcn/cinemahd.png",
"banner": "",
"version": 1,
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,321 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel CinemaHD -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://www.cinemahd.co/'
IDIOMAS = {'Latino': 'LAT'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['gounlimited', 'rapidvideo', 'vshare', 'clipwatching', 'jawclowd', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Películas", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='MovieList'))
itemlist.append(Item(channel=item.channel, title="Series", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='Series'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def sub_menu(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host,
thumbnail=get_thumb('last', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True), type=item.type ))
if item.type != 'Series':
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True), type=item.type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
full_data = data
if item.section != '':
data = scrapertools.find_single_match(data, 'class="MovieList NoLmtxt(.*?)</ul>')
else:
data = scrapertools.find_single_match(data, '<!--<%s>.*?class="MovieList NoLmtxt(.*?)</ul>' % item.type)
if item.section == 'alpha':
patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?<img src="([^"]+)" alt=.*?'
patron += '<strong>([^"]+)</strong>.*?<td>(\d{4})</td>'
matches = re.compile(patron, re.DOTALL).findall(full_data)
else:
patron = '<article.*?<a href="(.*?)">.*?<img src="(.*?)" alt=.*?'
patron += '<h3 class="Title">(.*?)<\/h3>.*?date_range">(\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if year == '':
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
cleantitle = scrapedtitle[0].strip()
else:
cleantitle = scrapedtitle
cleantitle = re.sub('\(.*?\)', '', cleantitle)
if not config.get_setting('unify'):
title = '%s [%s]'%(cleantitle, year)
else:
title = cleantitle
thumbnail = 'http:'+scrapedthumbnail
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels = {'year': year}
)
if 'series' not in url:
new_item.contentTitle = cleantitle
new_item.action='findvideos'
else:
new_item.contentSerieName = cleantitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
if item.type == 'Series':
url = host + '?tr_post_type=2'
else:
url = host + '?tr_post_type=1'
data = get_source(url)
action = 'list_all'
if item.section == 'genre':
patron = '<a href="([^ ]+)" class="Button STPb">(.*?)</a>'
elif item.section == 'alpha':
patron = '<li><a href="(.*?letter.*?)">(.*?)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
if title != 'Ver más':
if item.type == 'Series':
url =url + '?tr_post_type=2'
else:
url = url + '?tr_post_type=1'
if 'serie'in title.lower():
continue
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section,
type=item.type)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada <span>(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
full_data=get_source(item.url)
data = scrapertools.find_single_match(full_data, 'Temporada <span>\d+.*?</ul>')
patron='<span class="Num">(\d+)<.*?<a href="([^"]+)".*?"MvTbTtl".*?">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.decodeHtmlentities(data)
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)" frameborder')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
language = opt_data[0].strip()
language = re.sub('\(|\)', '', language)
quality = opt_data[1].strip()
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play', infoLabels=item.infoLabels))
elif 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.section = 'search'
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host+'animacion/?tr_post_type=1'
elif categoria == 'terror':
item.url = host+'terror/?tr_post_type=1'
item.type = 'MovieList'
item.section = 'search'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,63 +0,0 @@
{
"id": "cineonline",
"name": "cineonline",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "https://www.cine-online.eu/wp-content/uploads/2015/04/CINE-logo-bueno.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VOSE"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -1,214 +0,0 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re, urllib, urlparse
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools
from channels import filtertools
host = 'https://www.cine-online.eu'
IDIOMAS = {'Español': 'ESP', 'Cast': 'ESP', 'Latino': 'LAT', 'Lat': 'LAT', 'Subtitulado': 'VOSE', 'Sub': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['Streamango', 'Vidoza', 'Openload', 'Streamcherry', 'Netutv']
# list_quality = ['Brscreener', 'HD', 'TS']
list_quality = []
__channel__='cineonline'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title ="Películas", action ="mainlist_pelis"))
itemlist.append(item.clone(title="Series" , action="lista", url= host + "/serie/"))
itemlist.append(item.clone(title="Buscar", action="search"))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def mainlist_pelis(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Novedades" , action="lista", url= host))
itemlist.append(item.clone(title="Castellano" , action="lista", url= host + "/tag/castellano/"))
itemlist.append(item.clone(title="Latino" , action="lista", url= host + "/tag/latino/"))
itemlist.append(item.clone(title="Subtituladas" , action="lista", url= host + "/tag/subtitulado/"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
itemlist.append(item.clone(title="Año" , action="categorias", url= host))
itemlist.append(item.clone( title = 'Buscar', action = 'search', search_type = 'movie' ))
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "Año" in item.title:
data = scrapertools.find_single_match(data,'<h3>Año de estreno(.*?)</ul>')
patron = '<li><a href="([^"]+)">(\d+)</(\w)>'
else:
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a> <span>(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, cantidad in matches:
scrapedplot = ""
scrapedthumbnail = ""
title = scrapedtitle + " %s" % cantidad
itemlist.append(item.clone(channel=item.channel, action="lista", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div id="mt-\d+".*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="year">(\d+)</span>.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
scrapedtitle = scrapedtitle.replace("Ver", "").replace("online", "")
title = '%s (%s)' % (scrapedtitle, scrapedyear)
url = scrapedurl
new_item = Item(channel=item.channel,
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels={'year':scrapedyear})
if '/serie/' in url:
new_item.action = 'temporadas'
new_item.contentSerieName = scrapedtitle
else:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)">Siguiente</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(channel=item.channel , action="lista" , title="Next page >>" ,
text_color="blue", url=next_page_url) )
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<span class="se-t">(\d+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for numtempo in matches:
itemlist.append(item.clone( action='episodesxseason', title='Temporada %s' % numtempo, url = item.url,
contentType='season', contentSeason=numtempo ))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
# return sorted(itemlist, key=lambda it: it.title)
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="numerando">%s x (\d+)</div>.*?' % item.contentSeason
patron += '<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for episode, url, title in matches:
titulo = '%sx%s %s' % (item.contentSeason, episode, title)
itemlist.append(item.clone( action='findvideos', url=url, title=titulo,
contentType='episode', contentEpisodeNumber=episode ))
tmdb.set_infoLabels(itemlist)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'id="plays-(\d+)">\s*([^<]+)</div'
matches = scrapertools.find_multiple_matches(data, patron)
for xnumber, xname in matches:
if "/episodios/" in item.url:
lang = scrapertools.find_single_match(data, '#player2%s">([^<]+)</a>' % xnumber)
else:
lang = scrapertools.find_single_match(data, '#div%s">([^<]+)<' % xnumber)
if "lat" in lang.lower(): lang= "Lat"
if 'cast' in lang.lower(): lang= "Cast"
if 'sub' in lang.lower(): lang= "Sub"
if lang in IDIOMAS:
lang = IDIOMAS[lang]
post= {"nombre":xname}
url= httptools.downloadpage("https://www.cine-online.eu/ecrypt", post=urllib.urlencode(post)).data
url = scrapertools.find_single_match(url,'<(?:IFRAME SRC|iframe src)="([^"]+)"')
if not config.get_setting('unify'):
title = ' (%s)' % (lang)
else:
title = ''
if url != '':
itemlist.append(item.clone(action="play", title='%s'+title, url=url, language=lang ))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' and not "/episodios/" in item.url :
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,147 +0,0 @@
{
"id": "cinetux",
"name": "Cinetux",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "cinetux.png",
"banner": "cinetux.png",
"fanart": "cinetux.jpg",
"categories": [
"direct",
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Subtitulado",
"Español",
"SUB"
]
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "filterlinks",
"type": "list",
"label": "Mostrar enlaces de tipo...",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Solo Descarga",
"Solo Online",
"No filtrar"
]
},
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces del canal en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"VOSE",
"Latino",
"Español",
"No filtrar"
]
}, {
"id": "viewmode",
"type": "list",
"label": "Elegir vista por defecto (Confluence)...",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Sinopsis",
"Miniatura",
"Lista"
]
}
]
}

View File

@@ -1,302 +0,0 @@
# -*- coding: utf-8 -*-
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
IDIOMAS = {'Latino': 'Latino', 'Subtitulado': 'Subtitulado', 'Español': 'Español', 'SUB': 'SUB' }
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'okru', 'vidoza', 'openload', 'powvideo', 'netutv','gvideo']
CHANNEL_HOST = "http://www.cinetux.to/"
# Configuracion del canal
__modo_grafico__ = config.get_setting('modo_grafico', 'cinetux')
__perfil__ = config.get_setting('perfil', 'cinetux')
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
viewmode_options = {0: 'movie_with_plot', 1: 'movie', 2: 'list'}
viewmode = viewmode_options[config.get_setting('viewmode', 'cinetux')]
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
item.viewmode = viewmode
data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
total = scrapertools.find_single_match(data, "Películas</h1><span>(.*?)</span>")
titulo = "Peliculas (%s)" %total
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail=get_thumb('newest', auto=True),
text_color=color1))
itemlist.append(item.clone(action="destacadas", title=" Destacadas", url=CHANNEL_HOST + "mas-vistos/",
thumbnail=get_thumb('hot', auto=True),
text_color=color1))
itemlist.append(item.clone(action="idioma", title=" Por idioma", text_color=color1,
thumbnail=get_thumb('language', auto=True)))
itemlist.append(item.clone(action="generos", title=" Por géneros", url=CHANNEL_HOST,
thumbnail=get_thumb('genres', auto=True),
text_color=color1))
itemlist.append(item.clone(title="Documentales", text_bold=True, text_color=color2, action=""))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "genero/documental/", text_color=color1,
thumbnail=get_thumb('newest', auto=True)))
itemlist.append(item.clone(action="peliculas", title=" Por orden alfabético", text_color=color1, url=CHANNEL_HOST + "genero/documental/?orderby=title&order=asc&gdsr_order=asc",
thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
item.url = CHANNEL_HOST + "?s="
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = CHANNEL_HOST
item.action = "peliculas"
elif categoria == 'documentales':
item.url = CHANNEL_HOST + "genero/documental/"
item.action = "peliculas"
elif categoria == 'infantiles':
item.url = CHANNEL_HOST + "genero/animacion/"
item.action = "peliculas"
elif categoria == 'terror':
item.url = CHANNEL_HOST + "genero/terror/"
item.action = "peliculas"
elif categoria == 'castellano':
item.url = CHANNEL_HOST + "idioma/espanol/"
item.action = "peliculas"
elif categoria == 'latino':
item.url = CHANNEL_HOST + "idioma/latino/"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
item.text_color = color2
data = httptools.downloadpage(item.url).data
patron = '(?s)class="(?:result-item|item movies)">.*?<img src="([^"]+)'
patron += '.*?alt="([^"]+)"'
patron += '(.*?)'
patron += 'href="([^"]+)"'
patron += '.*?(?:<span>|<span class="year">)(.+?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, scrapedyear in matches:
quality = scrapertools.find_single_match(quality, '.*?quality">([^<]+)')
try:
fulltitle = scrapedtitle
year = scrapertools.find_single_match(scrapedyear,'\d{4}')
if "/" in fulltitle:
fulltitle = fulltitle.split(" /", 1)[0]
scrapedtitle = "%s (%s)" % (fulltitle, year)
except:
fulltitle = scrapedtitle
if quality:
scrapedtitle += " [%s]" % quality
new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=fulltitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
contentType="movie", quality=quality)
if year:
new_item.infoLabels['year'] = int(year)
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
# Extrae el paginador
next_page_link = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)')
if next_page_link:
itemlist.append(item.clone(action="peliculas", title=">> Página siguiente", url=next_page_link,
text_color=color3))
return itemlist
def destacadas(item):
logger.info()
itemlist = []
item.text_color = color2
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'peliculas_destacadas.*?class="letter_home"')
patron = '(?s)href="([^"]+)".*?'
patron += 'alt="([^"]+)".*?'
patron += 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = CHANNEL_HOST + scrapedurl
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
contentType="movie"
))
next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s+><span [^>]+>&raquo;</span>')
if next_page_link:
itemlist.append(
item.clone(action="destacadas", title=">> Página siguiente", url=next_page_link, text_color=color3))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '(?s)dos_columnas">(.*?)</ul>')
patron = '<li><a.*?href="/([^"]+)">(.*?)</li>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
scrapedurl = CHANNEL_HOST + scrapedurl
scrapedtitle = scrapertools.htmlclean(scrapedtitle).strip()
scrapedtitle = unicode(scrapedtitle, "utf8").capitalize().encode("utf8")
if scrapedtitle == "Erotico" and config.get_setting("adult_mode") == 0:
continue
itemlist.append(item.clone(action="peliculas", title=scrapedtitle, url=scrapedurl))
return itemlist
def idioma(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="peliculas", title="Español", url= CHANNEL_HOST + "idioma/espanol/"))
itemlist.append(item.clone(action="peliculas", title="Latino", url= CHANNEL_HOST + "idioma/latino/"))
itemlist.append(item.clone(action="peliculas", title="VOSE", url= CHANNEL_HOST + "idioma/subtitulado/"))
return itemlist
def findvideos(item):
import urllib
logger.info()
itemlist=[]
data = httptools.downloadpage(item.url).data
patron = 'tooltipctx.*?data-type="([^"]+).*?'
patron += 'data-post="(\d+)".*?'
patron += 'data-nume="(\d+).*?'
patron += 'class="title">.*?src.*?/>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for tp, pt, nm, language in matches:
language = language.strip()
post = {'action':'doo_player_ajax', 'post':pt, 'nume':nm, 'type':tp}
post = urllib.urlencode(post)
new_data = httptools.downloadpage(CHANNEL_HOST+'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.url}).data
if not config.get_setting('unify'):
if item.quality == '':
quality = 'SD'
else:
quality = item.quality
title = ' [%s][%s]' % (quality, IDIOMAS[language])
else:
title = ''
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
url = get_url(url)
if url:
itemlist.append(item.clone(title ='%s'+title, url=url, action='play',
language=IDIOMAS[language], text_color = ""))
patron = "<a class='optn' href='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
matches = scrapertools.find_multiple_matches(data, patron)
for hidden_url, quality, language in matches:
if not config.get_setting('unify'):
title = ' [%s][%s]' % (quality, IDIOMAS[language])
else:
title = ''
new_data = httptools.downloadpage(hidden_url).data
url = scrapertools.find_single_match(new_data, 'id="link" href="([^"]+)"')
url = get_url(url)
if url:
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
language=IDIOMAS[language], infoLabels=item.infoLabels, text_color = ""))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda it: (it.language, it.server, it.quality))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist:
if item.contentChannel != "videolibrary":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist
def get_url(url):
logger.info()
url = url.replace('\\/', '/')
if "cinetux.me" in url:
d1 = httptools.downloadpage(url).data
if "mail" in url or "drive" in url or "ok.cinetux" in url or "mp4/" in url:
id = scrapertools.find_single_match(d1, '<img src="[^#]+#([^"]+)"')
d1 = d1.replace("'",'"')
url = scrapertools.find_single_match(d1, '<iframe src="([^"]+)') + id
if "drive" in url:
url += "/preview"
if "FFFFFF" in url:
url = scrapertools.find_single_match(d1, 'class="cta" href="([^"]+)"')
else:
url = scrapertools.find_single_match(d1, 'document.location.replace\("([^"]+)')
url = url.replace("povwideo","powvideo")
return url
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -1,44 +0,0 @@
{
"id": "clasicofilm",
"name": "ClasicoFilm",
"active": true,
"adult": false,
"language": ["esp", "cast"],
"thumbnail": "http://i.imgur.com/F7sevVu.jpg?1",
"banner": "clasicofilm.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,194 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import jsontools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, platformtools, logger
from channelselector import get_thumb
host = "http://www.classicofilm.com/"
# Configuracion del canal
__modo_grafico__ = config.get_setting('modo_grafico', 'clasicofilm')
__perfil__ = config.get_setting('perfil', 'clasicofilm')
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
if __perfil__ - 1 >= 0:
color1, color2, color3 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = ""
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Películas", text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades",
url = host + "feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost",
thumbnail=get_thumb('newest', auto=True), text_color=color1))
#itemlist.append(item.clone(action="generos", title=" Por géneros", url=host,
# thumbnail=get_thumb('genres', auto=True), text_color=color1))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
item.url = host + "search?q=%s" % texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + "feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
data = jsontools.load(data)["feed"]
for entry in data["entry"]:
bb=jsontools.dump(entry["author"])
aa=scrapertools.find_single_match(bb, '(?s)src": "([^"]+)')
if "Enviar comentarios" in entry: continue
for link in entry["link"]:
if link["rel"] == "alternate":
title = link["title"]
url = link["href"]
break
thumbnail = "https:" + bb
thumbnail = thumbnail.replace("s72-c/", "") #"" #entry["media$thumbnail"]["url"].replace("s72-c/", "")
try:
title_split = re.split(r"\s*\((\d)", title, 1)
year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)')
fulltitle = title_split[0]
except:
fulltitle = title
year = ""
if not "DVD" in title and not "HDTV" in title and not "HD-" in title:
continue
infolabels = {'year': year}
new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle,
url=url, thumbnail=thumbnail, infoLabels=infolabels,
contentTitle=fulltitle, contentType="movie")
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)'))
totalresults = int(data["openSearch$totalResults"]["$t"])
if actualpage + 20 < totalresults:
url_next = item.url.replace("start-index=" + str(actualpage), "start-index=" + str(actualpage + 20))
itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=url_next))
return itemlist
def busqueda(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = """post-title entry-titl.*?href='([^']+)'"""
patron += """>([^<]+).*?"""
patron += """src="([^"]+)"""
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]{4})\)")
ctitle = scrapedtitle.split("(")[0].strip()
itemlist.append(item.clone(action = "findvideos",
contentTitle = ctitle,
infoLabels = {"year" : year},
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
return itemlist
def generos(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<b>([^<]+)</b><br\s*/>\s*<script src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
scrapedurl = scrapedurl.replace("&amp;","&")
scrapedurl = scrapedurl.replace("max-results=500", "start-index=1&max-results=20") \
.replace("recentpostslist", "finddatepost")
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=item.thumbnail, text_color=color3))
itemlist.sort(key=lambda x: x.title)
return itemlist
def decodifica_id(txt):
res = ''
for i in range(0, len(txt), 3):
res += '\\u0' + txt[i:i+3]
return res.decode('unicode-escape') #Ej: {"v":"9KD2iEmiYLsF"}
def findvideos(item):
logger.info()
itemlist = []
if item.infoLabels["tmdb_id"]:
tmdb.set_infoLabels_item(item, __modo_grafico__)
data = httptools.downloadpage(item.url).data
if "data:text/javascript;base64" in data:
div_id = scrapertools.find_single_match(data, '<div id="([0-9a-fA-F]+)"')
# ~ logger.info(div_id)
vid_id = scrapertools.find_single_match(decodifica_id(div_id), ':"([^"]+)"')
# ~ logger.info(vid_id)
itemlist.append(item.clone(url='http://netu.tv/watch_video.php?v='+vid_id, server='netutv', action='play'))
else:
iframe = scrapertools.find_single_match(data, '<iframe width="720".*?src="([^"]+)"')
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
if "goo.gl/" in iframe:
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.find_video_items(item, data)
if config.get_videolibrary_support():
itemlist.append(item.clone(action="add_pelicula_to_library", title="Añadir película a la videoteca"))
return itemlist

View File

@@ -1,95 +0,0 @@
{
"id": "crunchyroll",
"name": "Crunchyroll",
"language": ["esp", "lat", "cast"],
"active": true,
"adult": false,
"thumbnail": "http://i.imgur.com/O49fDS1.png",
"categories": [
"anime",
"tvshow"
],
"settings": [
{
"id": "crunchyrolluser",
"type": "text",
"color": "0xFF25AA48",
"label": "@30014",
"enabled": true,
"visible": true
},
{
"id": "crunchyrollpassword",
"type": "text",
"color": "0xFF25AA48",
"hidden": true,
"label": "@30015",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "crunchyrollidioma",
"type": "list",
"label": "Idioma de los textos de la web",
"default": 6,
"enabled": true,
"visible": true,
"lvalues": [
"Alemán",
"Portugués",
"Francés",
"Italiano",
"Inglés",
"Español Latino",
"Español España"
]
},
{
"id": "crunchyrollsub",
"type": "list",
"label": "Idioma de subtítulos preferido en Crunchyroll",
"default": 6,
"enabled": true,
"visible": true,
"lvalues": [
"Alemán",
"Portugués",
"Francés",
"Italiano",
"Inglés",
"Español Latino",
"Español España"
]
},
{
"id": "proxy_usa",
"type": "bool",
"label": "Usar proxy para ver el catálogo de USA",
"default": false,
"visible": true,
"enabled": "!eq(+1,true)"
},
{
"id": "proxy_spain",
"type": "bool",
"label": "Usar proxy para ver el catálogo de España",
"default": false,
"visible": true,
"enabled": "!eq(-1,true)"
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,323 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
__perfil__ = config.get_setting('perfil', "crunchyroll")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ - 1 >= 0:
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://www.crunchyroll.com"
proxy_u = "http://anonymouse.org/cgi-bin/anon-www.cgi/"
proxy_e = "http://proxyanonimo.es/browse.php?u="
def login():
logger.info()
langs = ['deDE', 'ptPT', 'frFR', 'itIT', 'enUS', 'esLA', 'esES']
lang = langs[config.get_setting("crunchyrollidioma", "crunchyroll")]
httptools.downloadpage(host + "/ajax/", "req=RpcApiTranslation_SetLang&locale=%s" % lang)
login_page = host.replace("http","https") + "/login"
user = config.get_setting("crunchyrolluser", "crunchyroll")
password = config.get_setting("crunchyrollpassword", "crunchyroll")
if not user or not password:
return False, "", ""
data = httptools.downloadpage(login_page).data
if not "<title>Redirecting" in data:
token = scrapertools.find_single_match(data, 'name="login_form\[_token\]" value="([^"]+)"')
redirect_url = scrapertools.find_single_match(data, 'name="login_form\[redirect_url\]" value="([^"]+)"')
post = "login_form%5Bname%5D=" + user + "&login_form%5Bpassword%5D=" + password + \
"&login_form%5Bredirect_url%5D=" + redirect_url + "&login_form%5B_token%5D=" + token
data = httptools.downloadpage(login_page, post).data
if not "<title>Redirecting" in data:
if "Usuario %s no disponible" % user in data:
return False, "El usuario de crunchyroll no existe.", ""
elif '<li class="error">Captcha' in data:
return False, "Es necesario resolver un captcha. Loguéate desde un navegador y vuelve a intentarlo", ""
else:
return False, "No se ha podido realizar el login.", ""
data = httptools.downloadpage(host).data
premium = scrapertools.find_single_match(data, ',"premium_status":"([^"]+)"')
premium = premium.replace("_", " ").replace("free trial", "Prueba Gratuita").capitalize()
locate = scrapertools.find_single_match(data, 'title="Your detected location is (.*?)."')
if locate:
premium += " - %s" % locate
return True, "", premium
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
proxy_usa = config.get_setting("proxy_usa", "crunchyroll")
proxy_spain = config.get_setting("proxy_spain", "crunchyroll")
item.login = False
error_message = ""
global host
if not proxy_usa and not proxy_spain:
item.login, error_message, premium = login()
elif proxy_usa:
item.proxy = "usa"
host = proxy_u + host
elif proxy_spain:
httptools.downloadpage("http://proxyanonimo.es/")
item.proxy = "spain"
host = proxy_e + host
if not item.login and error_message:
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False, text_color=color4))
elif item.login:
itemlist.append(item.clone(title="Tipo de cuenta: %s" % premium, action="", text_color=color4))
elif item.proxy:
itemlist.append(item.clone(title="Usando proxy: %s" % item.proxy.capitalize(), action="", text_color=color4))
itemlist.append(item.clone(title="Anime", action="", text_color=color2))
item.contentType = "tvshow"
itemlist.append(
item.clone(title=" Novedades", action="lista", url=host + "/videos/anime/updated/ajax_page?pg=0", page=0))
itemlist.append(
item.clone(title=" Popular", action="lista", url=host + "/videos/anime/popular/ajax_page?pg=0", page=0))
itemlist.append(item.clone(title=" Emisiones Simultáneas", action="lista",
url=host + "/videos/anime/simulcasts/ajax_page?pg=0", page=0))
itemlist.append(item.clone(title=" Índices", action="indices"))
itemlist.append(item.clone(title="Drama", action="", text_color=color2))
itemlist.append(
item.clone(title=" Popular", action="lista", url=host + "/videos/drama/popular/ajax_page?pg=0", page=0))
itemlist.append(item.clone(title=" Índice Alfabético", action="indices",
url=host + "/videos/drama/alpha"))
if item.proxy != "usa":
itemlist.append(item.clone(action="calendario", title="Calendario de Estrenos Anime", text_color=color4,
url=host + "/simulcastcalendar"))
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
user = config.get_setting("crunchyrolluser", "crunchyroll")
password = config.get_setting("crunchyrollpassword", "crunchyroll")
sub = config.get_setting("crunchyrollsub", "crunchyroll")
config.set_setting("crunchyrolluser", user)
config.set_setting("crunchyrollpassword", password)
values = [6, 5, 4, 3, 2, 1, 0]
config.set_setting("crunchyrollsub", str(values[sub]))
platformtools.itemlist_refresh()
return ret
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
next = item.url.replace("?pg=%s" % item.page, "?pg=%s" % str(item.page + 1))
data_next = httptools.downloadpage(next).data
patron = '<li id="media_group_(\d+)".*?title="([^"]+)".*?href="([^"]+)".*?src="([^"]+)"' \
'.*?<span class="series-data.*?>\s*([^<]+)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for id, title, url, thumb, videos in matches:
if item.proxy == "spain":
url = "http://proxyanonimo.es" + url.replace("&amp;b=12", "")
elif not item.proxy:
url = host + url
thumb = urllib.unquote(thumb.replace("/browse.php?u=", "").replace("_thumb", "_full").replace("&amp;b=12", ""))
scrapedtitle = "%s (%s)" % (title, videos.strip())
plot = scrapertools.find_single_match(data, '%s"\).data.*?description":"([^"]+)"' % id)
plot = unicode(plot, 'unicode-escape', "ignore")
itemlist.append(item.clone(action="episodios", url=url, title=scrapedtitle, thumbnail=thumb,
contentTitle=title, contentSerieName=title, infoLabels={'plot': plot},
text_color=color2))
if '<li id="media_group' in data_next:
itemlist.append(item.clone(action="lista", url=next, title=">> Página Siguiente", page=item.page + 1,
text_color=""))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\t|\s{2,}', '', data)
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)"' \
'style="width:(.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \
'\s*(.*?)</p>.*?description":"([^"]+)"'
if data.count('class="season-dropdown') > 1:
bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+".*?title="([^"]+)"(.*?)</ul>')
for season, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
itemlist.append(item.clone(action="", title=season, text_color=color3))
for url, thumb, media_id, visto, title, subt, plot in matches:
if item.proxy == "spain":
url = urllib.unquote(url.replace("/browse.php?u=", "").replace("&amp;b=12", ""))
elif not item.proxy:
url = host + url
url = url.replace(proxy_u, "")
thumb = urllib.unquote(
thumb.replace("/browse.php?u=", "").replace("_wide.", "_full.").replace("&amp;b=12", ""))
title = " %s - %s" % (title, subt)
if visto != "0":
title += " [COLOR %s][V][/COLOR]" % color5
itemlist.append(
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumb, media_id=media_id,
server="crunchyroll", text_color=item.text_color, contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, contentType="tvshow"))
else:
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumb, media_id, visto, title, subt, plot in matches:
if item.proxy == "spain":
url = urllib.unquote(url.replace("/browse.php?u=", "").replace("&amp;b=12", ""))
elif not item.proxy:
url = host + url
url = url.replace(proxy_u, "")
thumb = urllib.unquote(
thumb.replace("/browse.php?u=", "").replace("_wide.", "_full.").replace("&amp;b=12", ""))
title = "%s - %s" % (title, subt)
if visto != "0":
title += " [COLOR %s][V][/COLOR]" % color5
itemlist.append(
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumb, media_id=media_id,
server="crunchyroll", text_color=item.text_color, contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, contentType="tvshow"))
return itemlist
def indices(item):
logger.info()
itemlist = []
if not item.url:
itemlist.append(item.clone(title="Alfabético", url=host + "/videos/anime/alpha"))
itemlist.append(item.clone(title="Géneros", url=host + "/videos/anime"))
itemlist.append(item.clone(title="Temporadas", url=host + "/videos/anime"))
else:
data = httptools.downloadpage(item.url).data
if "Alfabético" in item.title:
bloque = scrapertools.find_single_match(data, '<div class="content-menu cf ">(.*?)</div>')
matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<')
for url, title in matches:
if "todo" in title:
continue
if item.proxy == "spain":
url = proxy_e + host + url
elif item.proxy == "usa":
url = proxy_u + host + url
else:
url = host + url
itemlist.append(item.clone(action="alpha", title=title, url=url, page=0))
elif "Temporadas" in item.title:
bloque = scrapertools.find_single_match(data,
'<div class="season-selectors cf selectors">(.*?)<div id="container"')
matches = scrapertools.find_multiple_matches(bloque, 'href="#([^"]+)".*?title="([^"]+)"')
for url, title in matches:
url += "/ajax_page?pg=0"
if item.proxy == "spain":
url = proxy_e + host + url
elif item.proxy == "usa":
url = proxy_u + host + url
else:
url = host + url
itemlist.append(item.clone(action="lista", title=title, url=url, page=0))
else:
bloque = scrapertools.find_single_match(data, '<div class="genre-selectors selectors">(.*?)</div>')
matches = scrapertools.find_multiple_matches(bloque, '<input id="([^"]+)".*?title="([^"]+)"')
for url, title in matches:
url = "%s/genres/ajax_page?pg=0&tagged=%s" % (item.url, url)
if item.proxy == "spain":
url = proxy_e + url.replace("&", "%26")
elif item.proxy == "usa":
url = proxy_u + url
itemlist.append(item.clone(action="lista", title=title, url=url, page=0))
return itemlist
def alpha(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="wrapper hover-toggle-queue.*?title="([^"]+)".*?href="([^"]+)".*?src="([^"]+)"' \
'.*?<span class="series-data.*?>\s*([^<]+)</span>.*?<p.*?>(.*?)</p>'
matches = scrapertools.find_multiple_matches(data, patron)
for title, url, thumb, videos, plot in matches:
if item.proxy == "spain":
url = "http://proxyanonimo.es" + url.replace("&amp;b=12", "")
elif not item.proxy:
url = host + url
thumb = urllib.unquote(thumb.replace("/browse.php?u=", "").replace("_small", "_full").replace("&amp;b=12", ""))
scrapedtitle = "%s (%s)" % (title, videos.strip())
itemlist.append(item.clone(action="episodios", url=url, title=scrapedtitle, thumbnail=thumb,
contentTitle=title, contentSerieName=title, infoLabels={'plot': plot},
text_color=color2))
return itemlist
def calendario(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="specific-date">.*?datetime="\d+-(\d+)-(\d+).*?class="day-name">.*?>\s*([^<]+)</time>(.*?)</section>'
bloques = scrapertools.find_multiple_matches(data, patron)
for mes, dia, title, b in bloques:
patron = 'class="available-time">([^<]+)<.*?<cite itemprop="name">(.*?)</cite>.*?href="([^"]+)"' \
'.*?>\s*(.*?)\s*</a>(.*?)</article>'
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
title = "%s/%s - %s" % (dia, mes, title.strip())
itemlist.append(item.clone(action="", title=title))
for hora, title, url, subt, datos in matches:
subt = subt.replace("Available", "Disponible").replace("Episode", "Episodio").replace("in ", "en ")
subt = re.sub(r"\s{2,}", " ", subt)
if "<time" in subt:
subt = re.sub(r"<time.*?>", "", subt).replace("</time>", "")
scrapedtitle = " [%s] %s - %s" % (hora, scrapertools.htmlclean(title), subt)
scrapedtitle = re.sub(r"\[email&#160;protected\]|\[email\xc2\xa0protected\]", "Idolm@ster", scrapedtitle)
if "Disponible" in scrapedtitle:
if item.proxy == "spain":
url = urllib.unquote(url.replace("/browse.php?u=", "").replace("&amp;b=12", ""))
action = "play"
server = "crunchyroll"
else:
action = ""
server = ""
thumb = scrapertools.find_single_match(datos, '<img class="thumbnail" src="([^"]+)"')
if not thumb:
thumb = scrapertools.find_single_match(datos, 'src="([^"]+)"')
if thumb:
thumb = urllib.unquote(thumb.replace("/browse.php?u=", "").replace("_thumb", "_full") \
.replace("&amp;b=12", "").replace("_large", "_full"))
itemlist.append(item.clone(action=action, url=url, title=scrapedtitle, contentTitle=title, thumbnail=thumb,
text_color=color2, contentSerieName=title, server=server))
next = scrapertools.find_single_match(data, 'js-pagination-next"\s*href="([^"]+)"')
if next:
if item.proxy == "spain":
next = "http://proxyanonimo.es" + url.replace("&amp;b=12", "")
else:
next = host + next
itemlist.append(item.clone(action="calendario", url=next, title=">> Siguiente Semana"))
prev = scrapertools.find_single_match(data, 'js-pagination-last"\s*href="([^"]+)"')
if prev:
if item.proxy == "spain":
prev = "http://proxyanonimo.es" + url.replace("&amp;b=12", "")
else:
prev = host + prev
itemlist.append(item.clone(action="calendario", url=prev, title="<< Semana Anterior"))
return itemlist
def play(item):
logger.info()
if item.login and not "[V]" in item.title:
post = "cbelapsed=60&h=&media_id=%s" % item.media_id + "&req=RpcApiVideo_VideoView&cbcallcount=1&ht=0" \
"&media_type=1&video_encode_id=0&playhead=10000"
httptools.downloadpage(host + "/ajax/", post)
return [item]

View File

@@ -1,29 +0,0 @@
{
"id": "cuevana2espanol",
"name": "Cuevana2español",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "cuevana2espanol.png",
"categories": [
"movie"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,247 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from channelselector import get_thumb
from core.item import Item
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from platformcode import config, logger
from channels import autoplay
host = "http://cuevana2espanol.com/"
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'directo', 'yourupload', 'openload', 'dostream']
def load_data(url):
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
return data
def redirect_url(url, parameters=None):
data = httptools.downloadpage(url, post=parameters)
logger.info(data.url)
return data.url
def mainlist(item):
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "movies",
url = host + "ver-pelicula-online", thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "movies",
url = host + "calificaciones", thumbnail = get_thumb("favorites", auto = True) ))
itemlist.append(Item(channel = item.channel, title = "Destacadas", action = "movies",
url = host + "tendencias", thumbnail = get_thumb("hot", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Ranking IMDB", action = "moviesIMDB",
url = host + "raking-imdb", thumbnail = get_thumb("hot", auto = True) ))
itemlist.append(Item(channel = item.channel, title = "--- Busqueda ---", folder=False, text_bold=True))
itemlist.append(Item(channel = item.channel, title = "Por Letra", action = "letters",
url = host, thumbnail = get_thumb("alphabet", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search",
url = host + "?s=", thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def movies(item):
itemlist = []
data = load_data(item.url)
pattern = 'class="poster"><img src="([^"]+)" alt="([^"]+)".*?'
pattern += '</span> (.*?)</div>.*?'
pattern += 'href="([^"]+)".*?'
pattern += '<span>(\d+)</span>.*?'
matches = scrapertools.find_multiple_matches(data, pattern)
for img, title, ranking, link, age in matches:
itemTitle = "%s [COLOR yellow](%s)[/COLOR] [COLOR blue](%s)[/COLOR]" % (title, ranking, age)
itemlist.append(Item(channel = item.channel, title=itemTitle, fulltitle=title, thumbnail=img,
url=link, action="findvideos"))
next_page = scrapertools.find_single_match(data, 'href="([^"]+)" ><span class="icon-chevron-right">')
if next_page:
itemlist.append(Item(channel = item.channel, title="Siguiente Pagina",
url=next_page, action="movies"))
return itemlist
def moviesIMDB(item):
itemlist = []
data = load_data(item.url)
pattern = '"poster"><a href="([^"]+)"><img src="([^"]+)".*?'
pattern += 'class="puesto">(\d+)</div>.*?'
pattern += '"rating">(.*?)</div>.*?'
pattern += '"title">.*?>(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, pattern)
for link, img, rank, rating, title in matches:
itemTitle = "%s [COLOR blue](#%s)[/COLOR] [COLOR yellow](%s)[/COLOR]" % (title, rank, rating)
img = img.replace('-90x135', '')
itemlist.append(Item(channel = item.channel, title=itemTitle, fulltitle=title, thumbnail=img,
url=link, action="findvideos"))
return itemlist
def byLetter(item):
itemlist = []
letter = item.extra
pageForNonce = load_data(item.url)
nonce = scrapertools.find_single_match(pageForNonce, '"nonce":"([^"]+)"')
raw = httptools.downloadpage('http://cuevana2espanol.com/wp-json/dooplay/glossary/?term=%s&nonce=%s&type=all' % (letter, nonce)).data
json = jsontools.load(raw)
logger.info(nonce)
if 'error' not in json:
for movie in json.items():
data = movie[1]
itemTitle = data['title']
if 'year' in data:
itemTitle += " [COLOR blue](%s)[/COLOR]" % data['year']
if data['imdb']:
itemTitle += " [COLOR yellow](%s)[/COLOR]" % data['imdb']
itemlist.append(Item(channel = item.channel, title=itemTitle, fulltitle=data['title'], url=data['url'],
thumbnail=data['img'].replace('-90x135', ''), action="findvideos"))
return itemlist
def letters(item):
itemlist = []
letter = '#ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for let in letter:
itemlist.append(item.clone(title=let, extra=let.lower(), action="byLetter"))
return itemlist
def searchMovies(item):
itemlist = []
data = load_data(item.url)
pattern = 'class="image">.*?href="([^"]+)".*?'
pattern += 'src="([^"]+)" alt="([^"]+)".*?'
pattern += 'class="year">(\d+)</span>.*?'
pattern += '<p>(.*?)</p>'
matches = scrapertools.find_multiple_matches(data, pattern)
for link, img, title, year, plot in matches:
itemTitle = "%s [COLOR blue](%s)[/COLOR]" % (title, year)
fullimg = img.replace('-150x150', '')
itemlist.append(Item(channel = item.channel, title=itemTitle, fulltitle=title, thumbnail=fullimg,
url=link, plot=plot, action="findvideos"))
next_page = scrapertools.find_single_match(data, 'href="([^"]+)" ><span class="icon-chevron-right">')
if next_page:
itemlist.append(Item(channel = item.channel, title="Siguiente Pagina",
url=next_page, action="searchMovies"))
return itemlist
def search(item, text):
text = text.lower().replace(' ', '+')
item.url += text
return searchMovies(item)
def GKPluginLink(hash):
hashdata = urllib.urlencode({r'link':hash})
try:
json = httptools.downloadpage('https://player4.cuevana2.com/plugins/gkpluginsphp.php', post=hashdata).data
except:
return None
logger.info(jsontools.load(json))
data = jsontools.load(json) if json else False
if data:
return data['link'] if 'link' in data else None
else:
return None
def OpenloadLink(hash):
hashdata = urllib.urlencode({r'h':hash})
json = httptools.downloadpage('http://cuevana2espanol.com/openload/api.php', post=hashdata).data
data = jsontools.load(json) if json else False
return data['url'] if data['status'] == 1 else None
def getContent(item, data):
item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="date">.*?(\d+)</span>')
item.plot = scrapertools.find_single_match(data, 'class="wp-content"><p>(.*?)</p>')
genres = ''
for genre in scrapertools.find_multiple_matches(data, '/genero/.*?"tag">(.*?)</a>'):
genres += genre + ', '
item.infoLabels['genre'] = genres.strip(', ')
def findvideos(item):
logger.info()
itemlist = []
data = load_data(item.url)
getContent(item, data)
"""
if item.extra:
getContentShow(data, item)
else:
getContentMovie(data, item)
"""
pattern = '<iframe class="metaframe rptss" src="([^"]+)"'
#itemlist.append(Item(channel = item.channel, title=item.url))
for link in scrapertools.find_multiple_matches(data, pattern):
#php.*?=(\w+)&
#url=(.*?)&
if 'player' in link:
logger.info("CUEVANA LINK %s" % link)
if r'%2Fopenload%2F' in link:
link = scrapertools.find_single_match(link, 'h%3D(\w+)')
link = OpenloadLink(link)
elif r'ir.php' in link:
link = scrapertools.find_single_match(link, 'php.*?=(.*)').replace('%3A', ':').replace('%2F', '/')
logger.info("CUEVANA IR %s" % link)
elif r'gdv.php' in link:
# google drive hace lento la busqueda de links, ademas no es tan buena opcion y es el primero que eliminan
continue
else:
link = scrapertools.find_single_match(link, 'php.*?=(\w+)')
link = GKPluginLink(link)
title = "[COLOR blue]Servidor [%s][/COLOR]"
elif 'youtube' in link:
title = "[COLOR yellow]Ver Trailer (%s)[/COLOR]"
else: # En caso de que exista otra cosa no implementada, reportar si no aparece pelicula
continue
if not link:
continue
# GKplugin puede devolver multiples links con diferentes calidades, si se pudiera colocar una lista de opciones
# personalizadas para Directo, se agradece, por ahora solo devuelve el primero que encuentre
if type(link) is list:
link = link[0]['link']
if r'chomikuj.pl' in link:
# En algunas personas la opcion CH les da error 401
link += "|Referer=https://player4.cuevana2.com/plugins/gkpluginsphp.php"
itemlist.append(
item.clone(
channel = item.channel,
title=title,
url=link, action='play'))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist):
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
))
return itemlist

View File

@@ -1,69 +0,0 @@
{
"id": "cuevana3",
"name": "Cuevana 3",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "http://www.cuevana3.com/wp-content/uploads/2017/08/logo-v10.png",
"banner": "",
"version": 1,
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,216 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel Cuevana 3 -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'http://www.cuevana3.com/'
IDIOMAS = {'Latino': 'LAT', 'Español': 'CAST', 'Subtitulado':'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host+'peliculas',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Estrenos", action="list_all", url=host+'estrenos',
thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Mas vistas", action="list_all", url=host+'peliculas-mas-vistas',
thumbnail=get_thumb('more watched', auto=True)))
itemlist.append(Item(channel=item.channel, title="Mas votadas", action="list_all", url=host+'peliculas-mas-valoradas',
thumbnail=get_thumb('more voted', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all", url= host+'espanol',
# thumbnail=get_thumb('audio', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'latino',
# thumbnail=get_thumb('audio', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'subtitulado',
# thumbnail=get_thumb('audio', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
# thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
try:
# if item.section == 'alpha':
# patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?'
# patron += 'src="([^"]+)" class.*?<strong>([^<]+)</strong>.*?<td>(\d{4})</td>'
# else:
patron = '<article class="TPost C post-\d+.*?<a href="([^"]+)">.*?'
patron +='"Year">(\d{4})<.*?src="([^"]+)".*?"Title">([^"]+)</h2>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, year, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
contentTitle = re.sub('\(.*?\)','', contentTitle)
title = '%s [%s]'%(contentTitle, year)
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'year':year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next page-numbers">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))
except:
pass
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
action = 'list_all'
if item.section == 'genre':
data = scrapertools.find_single_match(data, '>Géneros</a>.*?</ul>')
elif item.section == 'alpha':
data = scrapertools.find_single_match(data, '<ul class="AZList"><li>.*?</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
if title != 'Ver más':
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
itemlist.append(new_item)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'TPlayerNv="Opt(\w\d+)".*?img src="(.*?)<span>\d+ - (.*?) - ([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, url_data, language, quality in matches:
if 'domain' in url_data:
url = scrapertools.find_single_match(url_data, 'domain=([^"]+)"')
else:
url = scrapertools.find_single_match(data, 'id="Opt%s">.*?file=([^"]+)"' % option)
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language],
quality=quality, action='play'))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
tmdb.set_infoLabels_itemlist(itemlist, True)
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'infantiles':
item.url = host+'/category/animacion'
elif categoria == 'terror':
item.url = host+'/category/terror'
elif categoria == 'documentales':
item.url = host+'/category/documental'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,22 +0,0 @@
{
"id": "danimados",
"name": "Danimados",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "https://imgur.com/kU5Lx1S.png",
"banner": "https://imgur.com/xG5xqBq.png",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,200 +0,0 @@
# -*- coding: utf-8 -*-
import re
import base64
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
host = "https://www.danimados.com/"
list_servers = ['openload',
'okru',
'rapidvideo'
]
list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/", extra="Peliculas Animadas",
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "?s=",
thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = host + "?s=" + texto
if texto!='':
return sub_search(item)
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)class="thumbnail animation-.*?href=([^>]+).*?'
patron += 'src=(.*?(?:jpg|jpeg)).*?'
patron += 'alt=(?:"|)(.*?)(?:"|>).*?'
patron += 'class=year>(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
scrapedyear = scrapertools.find_single_match(scrapedyear, 'class="year">(\d{4})')
item.action = "findvideos"
item.contentTitle = scrapedtitle
item.contentSerieName = ""
if "serie" in scrapedurl:
item.action = "episodios"
item.contentTitle = ""
item.contentSerieName = scrapedtitle
title = scrapedtitle
if scrapedyear:
item.infoLabels['year'] = int(scrapedyear)
title += " (%s)" %item.infoLabels['year']
itemlist.append(item.clone(thumbnail = scrapedthumbnail,
title = title,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def mainpage(item):
logger.info()
itemlist = []
data1 = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
patron_sec='<divclass=head-main-nav>(.+?)peliculas\/>'
patron='<ahref=([^"]+)>([^"]+)<\/a>'#scrapedurl, #scrapedtitle
data = scrapertools.find_single_match(data1, patron_sec)
matches = scrapertools.find_multiple_matches(data, patron)
if item.title=="Géneros" or item.title=="Categorías":
for scrapedurl, scrapedtitle in matches:
if "Películas Animadas"!=scrapedtitle:
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="lista"))
return itemlist
else:
for scraped1, scraped2, scrapedtitle in matches:
scrapedthumbnail=scraped1
scrapedurl=scraped2
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.extra == "Peliculas Animadas":
data_lista = scrapertools.find_single_match(data, '(?is)archive-content(.*?)class=pagination')
else:
data_lista = scrapertools.find_single_match(data, 'class=items><article(.+?)<\/div><\/article><\/div>')
patron = '(?is)src=(.*?(?:jpg|jpeg)).*?'
patron += 'alt=(?:"|)(.*?)(?:"|>).*?'
patron += 'href=([^>]+)>.*?'
patron += 'title.*?<span>([^<]+)<'
matches = scrapertools.find_multiple_matches(data_lista, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
if item.title=="Peliculas Animadas":
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie",
action="findvideos", contentTitle=scrapedtitle, infoLabels={'year':scrapedyear}))
else:
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
context=autoplay.context,action="episodios", contentSerieName=scrapedtitle))
tmdb.set_infoLabels(itemlist)
next_page = scrapertools.find_single_match(data, 'rel=next href=([^>]+)>')
if next_page:
itemlist.append(item.clone(action="lista", title="Página siguiente>>", url=next_page, extra=item.extra))
return itemlist
def episodios(item):
logger.info()
itemlist = []
infoLabels = {}
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
logger.info(data)
patron = '<divid=episodes (.+?)<\/div><\/div><\/div>'
data_lista = scrapertools.find_single_match(data,patron)
contentSerieName = item.title
patron_caps = 'href=(.+?)><imgalt=".+?" '
patron_caps += 'src=([^"]+)><\/a>.*?'
patron_caps += 'numerando>([^<]+).*?'
patron_caps += 'episodiotitle>.*?>([^<]+)<\/a>'
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedurl, scrapedthumbnail, scrapedtempepi, scrapedtitle in matches:
tempepi=scrapedtempepi.split(" - ")
if tempepi[0]=='Pel':
tempepi[0]=0
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
item.infoLabels["season"] = tempepi[0]
item.infoLabels["episode"] = tempepi[1]
itemlist.append(item.clone(#thumbnail=scrapedthumbnail,
action="findvideos", title=title, url=scrapedurl))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + contentSerieName + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=contentSerieName))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'player-option-\d+.*?'
patron += 'data-sv=(\w+).*?'
patron += 'data-user="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
headers = {"X-Requested-With":"XMLHttpRequest"}
for scrapedserver, scrapeduser in matches:
data1 = httptools.downloadpage("https://space.danimados.space/gilberto.php?id=%s&sv=mp4" %scrapeduser).data
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
url = base64.b64decode(scrapertools.find_single_match(data1, '<iframe data-source="([^"]+)"'))
url1 = devuelve_enlace(url)
if url1:
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
item.clone(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library"))
autoplay.start(itemlist, item)
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def devuelve_enlace(url1):
if 'danimados' in url1:
url = 'https:' + url1
new_data = httptools.downloadpage(url).data
new_data = new_data.replace('"',"'")
url1 = scrapertools.find_single_match(new_data, "sources.*?file: '([^']+)")
return url1

View File

@@ -1,22 +0,0 @@
{
"id": "descargacineclasico",
"name": "descargacineclasico",
"language": ["esp", "cast"],
"active": true,
"adult": false,
"banner": "descargacineclasico2.png",
"thumbnail": "descargacineclasico2.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,139 +0,0 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from platformcode import logger, config
from core import scrapertools, httptools
from core import servertools
from core import tmdb
from core.item import Item
from lib import unshortenit
host = "http://www.descargacineclasico.net"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Últimas agregadas", action="agregadas",
url=host, viewmode="movie_with_plot", thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Listado por género", action="porGenero",
url=host, thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Listado alfabetico", action="porLetra",
url=host + "/cine-online/", thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host,
thumbnail=get_thumb('search', auto=True)))
return itemlist
def porLetra(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'noindex,nofollow" href="([^"]+)">(\w+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for url, titulo in matches:
itemlist.append( Item(channel=item.channel , action="agregadas" , title=titulo, url=url, viewmode="movie_with_plot"))
return itemlist
def porGenero(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<ul class="columnas">(.*?)</ul>'
data = re.compile(patron,re.DOTALL).findall(data)
patron = '<li.*?>.*?href="([^"]+).*?>([^<]+)'
matches = re.compile(patron,re.DOTALL).findall(data[0])
for url, genero in matches:
if genero == "Erótico" and config.get_setting("adult_mode") == 0:
continue
itemlist.append( Item(channel=item.channel , action="agregadas" , title=genero,url=url, viewmode="movie_with_plot"))
return itemlist
def search(item,texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "?s=" + texto
try:
return agregadas(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def agregadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
fichas = re.sub(r"\n|\s{2}","",scrapertools.find_single_match(data,'<div class="review-box-container">(.*?)wp-pagenavi'))
patron = '<div class="post-thumbnail"><a href="([^"]+)".*?' # url
patron+= 'title="([^"]+)".*?' # title
patron+= 'src="([^"]+).*?' # thumbnail
patron+= '<p>([^<]+)' # plot
matches = re.compile(patron,re.DOTALL).findall(fichas)
for url, title, thumbnail, plot in matches:
title = title.replace("Descargar y ver Online","").strip()
year = scrapertools.find_single_match(title, '\(([0-9]{4})')
fulltitle = title.replace("(%s)" %year,"").strip()
itemlist.append( Item(action="findvideos",
channel=item.channel,
contentSerieName="",
title=title+" ",
fulltitle=fulltitle ,
infoLabels={'year':year},
url=url ,
thumbnail=thumbnail,
plot=plot,
show=title) )
tmdb.set_infoLabels(itemlist)
# Paginación
try:
patron_nextpage = r'<a class="nextpostslink" rel="next" href="([^"]+)'
next_page = re.compile(patron_nextpage,re.DOTALL).findall(data)
itemlist.append( Item(channel=item.channel, action="agregadas", title="Página siguiente >>" , url=next_page[0], viewmode="movie_with_plot") )
except: pass
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.unescape(data)
patron = '#div_\d_\D.+?<img id="([^"]+).*?<span>.*?</span>.*?<span>(.*?)</span>.*?imgdes.*?imgdes/([^\.]+).*?<a href=([^\s]+)' #Añado calidad
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches:
while True:
loc = httptools.downloadpage(scrapedurl, follow_redirects=False).headers.get("location", "")
if not loc or "/ad/locked" in loc:
break
scrapedurl = loc
scrapedurl = scrapedurl.replace('"','')
scrapedurl, c = unshortenit.unshorten_only(scrapedurl)
title = item.title + "_" + scrapedidioma + "_"+ scrapedserver + "_" + scrapedcalidad
itemlist.append( item.clone(action="play",
title=title,
url=scrapedurl) )
itemlist = servertools.get_servers_itemlist(itemlist)
tmdb.set_infoLabels(itemlist)
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -1,37 +0,0 @@
{
"id": "dilo",
"name": "Dilo",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "https://s22.postimg.cc/u6efsniqp/dilo.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VOSE"
]
}
]
}

View File

@@ -1,296 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel Dilo -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www.dilo.nu/'
IDIOMAS = {'Español': 'CAST', 'Latino': 'LAT', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'powvideo', 'clipwatching', 'streamplay', 'streamcherry', 'gamovideo']
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes", url=host,
thumbnail=get_thumb('new episodes', auto=True)))
itemlist.append(Item(channel=item.channel, title="Ultimas", action="latest_shows", url=host,
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
url=host + 'catalogue', thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section", url=host + 'catalogue',
thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
patron += 'font-weight-500">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail)
new_item.contentSerieName=scrapedtitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
page_base = host + 'catalogue'
next_page = scrapertools.find_single_match(data, '<a href="([^ ]+)" aria-label="Netx">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=page_base+next_page, thumbnail=get_thumb("more.png"),
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(item.url)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, '>Todos los generos</button>.*?<button class')
elif 'Años' in item.title:
data = scrapertools.find_single_match(data, '>Todos los años</button>.*?<button class')
patron = 'input" id="([^"]+)".*?name="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for id, name in matches:
url = '%s?%s=%s' % (item.url, name, id)
title = id.capitalize()
itemlist.append(Item(channel=item.channel, title=title, url=url, action='list_all'))
return itemlist
def latest_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a class="media" href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?'
patron += 'width: 97%">([^<]+)</div><div>(\d+x\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcontent, scrapedep in matches:
title = '%s)' % (scrapedtitle.replace(' Online ', ' ('))
contentSerieName = scrapedcontent
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName, type='episode'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def latest_shows(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, '>Nuevas series</div>.*?text-uppercase"')
patron = '<div class="col-lg-3 col-md-4 col-6 mb-3"><a href="([^"]+)".*?src="([^"]+)".*?weight-500">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='seasons', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def seasons(item):
from core import jsontools
import urllib
logger.info()
itemlist=[]
data=get_source(item.url)
serie_id = scrapertools.find_single_match(data, '{"item_id": (\d+)}')
post = {'item_id': serie_id}
post = urllib.urlencode(post)
seasons_url = '%sapi/web/seasons.php' % host
headers = {'Referer':item.url}
data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data)
infoLabels = item.infoLabels
for dict in data:
season = dict['number']
if season != '0':
infoLabels['season'] = season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, url=item.url, title=title, action='episodesxseason',
contentSeasonNumber=season, id=serie_id, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodesxseason(item):
logger.info()
from core import jsontools
import urllib
logger.info()
itemlist = []
season = item.infoLabels['season']
post = {'item_id': item.id, 'season_number': season}
post = urllib.urlencode(post)
seasons_url = '%sapi/web/episodes.php' % host
headers = {'Referer': item.url}
data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data)
infoLabels = item.infoLabels
for dict in data:
episode = dict['number']
epi_name = dict['name']
title = '%sx%s - %s' % (season, episode, epi_name)
url = '%s%s/' % (host, dict['permalink'])
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, action='findvideos', url=url,
contentEpisodeNumber=episode, id=item.id, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'data-link="([^"]+)">.*?500">([^<]+)<.*?>Reproducir en ([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for enc_url, server, language in matches:
if not config.get_setting('unify'):
title = ' [%s]' % language
else:
title = ''
itemlist.append(Item(channel=item.channel, title='%s'+title, url=enc_url, action='play',
language=IDIOMAS[language], server=server, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def decode_link(enc_url):
logger.info()
try:
new_data = get_source(enc_url)
new_enc_url = scrapertools.find_single_match(new_data, 'src="([^"]+)"')
try:
url = httptools.downloadpage(new_enc_url, follow_redirects=False).headers['location']
except:
if not 'jquery' in new_enc_url:
url = new_enc_url
except:
pass
return url
def play(item):
logger.info()
item.url = decode_link(item.url)
itemlist = [item]
return itemlist
def search(item, texto):
logger.info()
import urllib
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "discoverymx",
"name": "Discoverymx",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "discoverymx.png",
"banner": "discoverymx.png",
"categories": [
"documentary"
]
}

View File

@@ -1,174 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
from core import httptools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="listvideos",
url="http://discoverymx.blogspot.com/"))
itemlist.append(Item(channel=item.channel, title="Documentales - Series Disponibles", action="DocuSeries",
url="http://discoverymx.blogspot.com/"))
itemlist.append(Item(channel=item.channel, title="Documentales - Tag", action="DocuTag",
url="http://discoverymx.blogspot.com/"))
itemlist.append(Item(channel=item.channel, title="Documentales - Archivo por meses", action="DocuARCHIVO",
url="http://discoverymx.blogspot.com/"))
return itemlist
def DocuSeries(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '<li><b><a href="([^"]+)" target="_blank">([^<]+)</a></b></li>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
scrapedurl = match[0]
scrapedtitle = match[1]
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def DocuTag(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
patronvideos = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+<span class='label-count' dir='ltr'>(.+?)</span>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
scrapedurl = match[0]
scrapedtitle = match[1] + " " + match[2]
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def DocuARCHIVO(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
patronvideos = "<a class='post-count-link' href='([^']+)'>([^<]+)</a>[^<]+"
patronvideos += "<span class='post-count' dir='ltr'>(.+?)</span>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
scrapedurl = match[0]
scrapedtitle = match[1] + " " + match[2]
scrapedthumbnail = ""
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def listvideos(item):
logger.info()
itemlist = []
scrapedthumbnail = ""
scrapedplot = ""
# Descarga la página
data = httptools.downloadpage(item.url).data
patronvideos = "<h3 class='post-title entry-title'[^<]+"
patronvideos += "<a href='([^']+)'>([^<]+)</a>.*?"
patronvideos += "<div class='post-body entry-content'(.*?)<div class='post-footer'>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
scrapedtitle = match[1]
scrapedtitle = re.sub("<[^>]+>", " ", scrapedtitle)
scrapedtitle = scrapertools.unescape(scrapedtitle)
scrapedurl = match[0]
regexp = re.compile(r'src="(http[^"]+)"')
matchthumb = regexp.search(match[2])
if matchthumb is not None:
scrapedthumbnail = matchthumb.group(1)
matchplot = re.compile('<div align="center">(<img.*?)</span></div>', re.DOTALL).findall(match[2])
if len(matchplot) > 0:
scrapedplot = matchplot[0]
# print matchplot
else:
scrapedplot = ""
scrapedplot = re.sub("<[^>]+>", " ", scrapedplot)
scrapedplot = scrapertools.unescape(scrapedplot)
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
# Extrae la marca de siguiente página
patronvideos = "<a class='blog-pager-older-link' href='([^']+)'"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedtitle = "Página siguiente"
scrapedurl = urlparse.urljoin(item.url, matches[0])
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, "<div class='post-body entry-content'(.*?)<div class='post-footer'>")
# Busca los enlaces a los videos
listavideos = servertools.findvideos(data)
for video in listavideos:
videotitle = scrapertools.unescape(video[0])
url = video[1]
server = video[2]
# xbmctools.addnewvideo( item.channel , "play" , category , server , , url , thumbnail , plot )
itemlist.append(Item(channel=item.channel, action="play", server=server, title=videotitle, url=url,
thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title, folder=False))
return itemlist

View File

@@ -1,106 +0,0 @@
{
"id": "divxtotal",
"name": "Divxtotal",
"active": true,
"adult": false,
"language": ["esp", "cast"],
"thumbnail": "http://imgur.com/Madj03A.jpg",
"categories": [
"torrent",
"movie",
"tvshow"
],
"settings": [
{
"default": true,
"enabled": true,
"id": "include_in_global_search",
"label": "Incluir en busqueda global",
"type": "bool",
"visible": true
},
{
"default": true,
"enabled": true,
"id": "modo_grafico",
"label": "Buscar información extra (TMDB)",
"type": "bool",
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "emergency_urls",
"type": "list",
"label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?",
"default": 1,
"enabled": true,
"visible": true,
"lvalues": [
"No",
"Guardar",
"Borrar",
"Actualizar"
]
},
{
"id": "emergency_urls_torrents",
"type": "bool",
"label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?",
"default": true,
"enabled": true,
"visible": "!eq(-1,'No')"
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
},
{
"id": "seleccionar_ult_temporadda_activa",
"type": "bool",
"label": "Seleccionar para Videoteca si estará activa solo la última Temporada",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": false
}
]
}

View File

@@ -1,879 +0,0 @@
# -*- coding: utf-8 -*-
import re
import sys
import urllib
import urlparse
import time
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = 'https://www.divxtotal3.net/'
channel = 'divxtotal'
categoria = channel.capitalize()
color1, color2, color3 = ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']
__modo_grafico__ = config.get_setting('modo_grafico', channel)
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel) #Actualización sólo últ. Temporada?
timeout = config.get_setting('timeout_downloadpage', channel)
def mainlist(item):
logger.info()
itemlist = []
thumb_cartelera = get_thumb("now_playing.png")
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
thumb_series = get_thumb("channels_tvshow.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
item.url_plus = "peliculas/"
itemlist.append(Item(channel=item.channel, title="Películas", action="categorias", url=host + item.url_plus, url_plus=item.url_plus, thumbnail=thumb_cartelera, extra="Películas"))
item.url_plus = "peliculas-hd/"
itemlist.append(Item(channel=item.channel, title="Películas HD", action="categorias", url=host + item.url_plus, url_plus=item.url_plus, thumbnail=thumb_pelis_hd, extra="Películas HD"))
item.url_plus = "peliculas-dvdr/"
itemlist.append(Item(channel=item.channel, title="Películas DVDR", action="categorias", url=host + item.url_plus, url_plus=item.url_plus, thumbnail=thumb_pelis_hd, extra="Películas DVDR"))
itemlist.append(Item(channel=item.channel, url=host, title="", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, url=host, title="Series", action="submenu", thumbnail=thumb_series, extra="series"))
itemlist.append(Item(channel=item.channel, url=host, title="", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "?s=%s", thumbnail=thumb_buscar, extra="search"))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def submenu(item):
logger.info()
itemlist = []
thumb_series = get_thumb("channels_tvshow.png")
if item.extra == "series":
item.url_plus = "series-12/"
itemlist.append(item.clone(title="Series completas", action="listado", url=item.url + item.url_plus, url_plus=item.url_plus, thumbnail=thumb_series, extra="series"))
itemlist.append(item.clone(title="Alfabético A-Z", action="alfabeto", url=item.url + item.url_plus + "?s=letra-%s", url_plus=item.url_plus, thumbnail=thumb_series, extra="series"))
return itemlist
def categorias(item):
logger.info()
itemlist = []
if item.extra3:
extra3 = item.extra3
del item.extra3
else:
extra3 = False
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
patron = '<li><a class="alist" href="([^"]+)">(.*?)<\/a><\/li>'
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if not data or not scrapertools.find_single_match(data, patron):
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
for clone_inter, autoridad in item.intervencion:
thumb_intervenido = get_thumb(autoridad)
itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
return itemlist #Salimos
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url + data)
#Si no hay datos consistentes, llamamos al método de fail_over para que encuentre un canal que esté activo y pueda gestionar el submenú
if not data: #Si no ha logrado encontrar nada, salimos
itemlist.append(item.clone(action='', title=item.category + ': ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches:
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug(item.url_plus)
#logger.debug(matches)
#Insertamos las cabeceras para todas las peliculas de la Aalidad, por Año, Alfabético, por Género, y Otras Calidades
if not extra3:
itemlist.append(item.clone(title="Todas las " + item.extra.upper(), action="listado"))
itemlist.append(item.clone(title="Alfabético A-Z", action="alfabeto", url=item.url + "?s=letra-%s"))
#itemlist.append(item.clone(title="Géneros", url=item.url))
for scrapedurl, scrapedtitle in matches:
if item.url_plus not in scrapedurl:
continue
if "Todas" in scrapedtitle:
continue
title = scrapedtitle.strip()
#Preguntamos por las entradas que corresponden al "extra"
if extra3 == 'now':
if scrapedtitle.lower() in ['ac3 51', 'bluray rip', 'series', 'serie', 'subtitulada', 'vose', 'bdrip', 'dvdscreener', 'brscreener r6', 'brscreener', 'webscreener', 'dvd', 'hdrip', 'screener', 'screeer', 'webrip', 'brrip', 'dvb', 'dvdrip', 'dvdsc', 'dvdsc - r6', 'hdts', 'hdtv', 'kvcd', 'line', 'ppv', 'telesync', 'ts hq', 'ts hq proper', '480p', '720p', 'ac3', 'bluray', 'camrip', 'ddc', 'hdtv - screener', 'tc screener', 'ts screener', 'ts screener alto', 'ts screener medio', 'vhs screener']:
itemlist.append(item.clone(action="listado", title=title, url=scrapedurl, extra2="categorias"))
elif scrapedtitle.lower() in ['ac3 51', 'bluray rip', 'series', 'serie', 'subtitulada', 'vose', 'bdrip', 'dvdscreener', 'brscreener r6', 'brscreener', 'webscreener', 'dvd', 'hdrip', 'screener', 'screeer', 'webrip', 'brrip', 'dvb', 'dvdrip', 'dvdsc', 'dvdsc - r6', 'hdts', 'hdtv', 'kvcd', 'line', 'ppv', 'telesync', 'ts hq', 'ts hq proper', '480p', '720p', 'ac3', 'bluray', 'camrip', 'ddc', 'hdtv - screener', 'tc screener', 'ts screener', 'ts screener alto', 'ts screener medio', 'vhs screener']:
extra3 = 'next'
else:
itemlist.append(item.clone(action="listado", title=" " + title.capitalize(), url=scrapedurl, extra2="categorias"))
if extra3 == 'next':
itemlist.append(item.clone(action="categorias", title="Otras Calidades", url=item.url + '-0-0-fx-1-1-.fx', extra2="categorias", extra3='now'))
return itemlist
def alfabeto(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="listado", title="0-9", url=item.url % "0"))
for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']:
itemlist.append(item.clone(action="listado", title=letra, url=item.url % letra.lower()))
return itemlist
def listado(item):
logger.info()
itemlist = []
item.category = categoria
#logger.debug(item)
curr_page = 1 # Página inicial
last_page = 99999 # Última página inicial
if item.curr_page:
curr_page = int(item.curr_page) # Si viene de una pasada anterior, lo usamos
del item.curr_page # ... y lo borramos
if item.last_page:
last_page = int(item.last_page) # Si viene de una pasada anterior, lo usamos
del item.last_page # ... y lo borramos
cnt_tot = 40 # Poner el num. máximo de items por página
cnt_title = 0 # Contador de líneas insertadas en Itemlist
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
if timeout_search < 5:
timeout_search = 5 # Timeout un poco más largo para las búsquedas
#Sistema de paginado para evitar páginas vacías o semi-vacías en casos de búsquedas con series con muchos episodios
title_lista = [] # Guarda la lista de series que ya están en Itemlist, para no duplicar lineas
if item.title_lista: # Si viene de una pasada anterior, la lista ya estará guardada
title_lista.extend(item.title_lista) # Se usa la lista de páginas anteriores en Item
del item.title_lista # ... limpiamos
if not item.extra2: # Si viene de Catálogo o de Alfabeto
item.extra2 = ''
next_page_url = item.url
#Máximo num. de líneas permitidas por TMDB. Máx de 10 segundos por Itemlist para no degradar el rendimiento
while cnt_title <= cnt_tot * 0.45 and curr_page <= last_page and fin > time.time():
# Descarga la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(next_page_url, timeout=timeout_search).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
curr_page += 1 #Apunto ya a la página siguiente
if not data and not item.extra2: #Si la web está caída salimos sin dar error
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Patrón para todo, menos para Series completas, incluido búsquedas en cualquier caso
patron = '<tr><td(?: class="[^"]+")?><a href="([^"]+)".?title="([^"]+)".*?<\/a><\/td><td(?: class="[^"]+")?>(?:<a href="[^"]+">)?(.*?)(?:<\/a>)?<\/td><td(?: class="[^"]+")?>.*?<\/td><td(?: class="[^"]+")?>(.*?)<\/td><\/tr>'
#Si son series completas, ponemos un patrón especializado
if item.extra == 'series':
patron = '<div class="[^"]+"><p class="[^"]+"><a href="([^"]+)".?title="([^"]+)"><img src="([^"]+)".*?<a href=\'[^\']+\'.?title="([^"]+)".*?<\/p><\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches and not '<p>Lo sentimos, pero que esta buscando algo que no esta aqui. </p>' in data and not item.extra2 and not '<h2>Sin resultados</h2> in data': #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
#Buscamos la próxima y la última página
patron_next = "<ul class=\"pagination\">.*?\(current\).*?href='([^']+)'>(\d+)<\/a><\/li>"
#patron_last = "<ul class=\"pagination\">.*?\(current\).*?href='[^']+'>\d+<\/a><\/li>.*?href='[^']+\/(\d+)\/(?:\?s=[^']+)?'><span aria-hidden='[^']+'>&\w+;<\/span><\/a><\/li><\/ul><\/nav><\/div><\/div><\/div>"
patron_last = "<ul class=\"pagination\">.*?\(current\).*?href='[^']+'>\d+<\/a><\/li>.*?href='[^']+\/(\d+)\/(?:\?[^']+)?'>(?:\d+)?(?:<span aria-hidden='[^']+'>&\w+;<\/span>)?<\/a><\/li><\/ul><\/nav><\/div><\/div><\/div>"
try:
next_page_url, next_page = scrapertools.find_single_match(data, patron_next)
next_page = int(next_page)
except: #Si no lo encuentra, lo ponemos a 1
#logger.error('ERROR 03: LISTADO: Al obtener la paginación: ' + patron_next + ' / ' + patron_last + ' / ' + scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?<\/span><\/a><\/li><\/ul><\/nav><\/div><\/div><\/div>"))
next_page = 1
#logger.debug('curr_page: ' + str(curr_page) + ' / next_page: ' + str(next_page) + ' / last_page: ' + str(last_page))
if last_page == 99999: #Si es el valor inicial, buscamos
try:
last_page = int(scrapertools.find_single_match(data, patron_last)) #lo cargamos como entero
except: #Si no lo encuentra, lo ponemos a 1
#logger.error('ERROR 03: LISTADO: Al obtener la paginación: ' + patron_next + ' / ' + patron_last + ' / ' + scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?<\/span><\/a><\/li><\/ul><\/nav><\/div><\/div><\/div>"))
last_page = next_page
#logger.debug('curr_page: ' + str(curr_page) + ' / next_page: ' + str(next_page) + ' / last_page: ' + str(last_page))
#Empezamos el procesado de matches
for scrapedurl, scrapedtitle, cat_ppal, size in matches:
if "/programas" in scrapedurl or "/otros" in scrapedurl:
continue
title = scrapedtitle
url = scrapedurl
title = title.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o").replace("ú", "u").replace("ü", "u").replace("�", "ñ").replace("ñ", "ñ").replace("&atilde;", "a").replace("&etilde;", "e").replace("&itilde;", "i").replace("&otilde;", "o").replace("&utilde;", "u").replace("&ntilde;", "ñ").replace("&#8217;", "'")
extra = item.extra
#Si es una búsqueda, convierte los episodios en Series completas, aptas para la Videoteca
if extra == 'search' and '/series' in scrapedurl and not "Temp" in title and not "emporada" in title:
if scrapedurl in title_lista: #Si ya hemos procesado la serie, pasamos de los episodios adicionales
continue
# Descarga la página del episodio, buscando el enlace a la serie completa
data_serie = ''
try:
data_serie = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(scrapedurl, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
if not data_serie: #Si la web está caída salimos sin dar error. Pintamos el episodio
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + scrapedurl + " / SERIE: " + scrapedurl)
else:
patron_serie = '<div id="where_i_am">.*?<a href="[^"]+">.*?<\/a>.*?<a href="([^"]+)">'
url = scrapertools.find_single_match(data_serie, patron_serie) #buscamos la url de la serie completa
if url:
url = host + url
extra = 'series' #es una serie completa
title_lista += [scrapedurl] #la añadimos a la lista de series completas procesadas
title = scrapedurl #salvamos el título de la serie completa
else:
url = scrapedurl #No se encuentra la Serie, se trata como Episodio suelto
#cnt_title += 1
item_local = item.clone() #Creamos copia de Item para trabajar
if item_local.tipo: #... y limpiamos
del item_local.tipo
if item_local.totalItems:
del item_local.totalItems
if item_local.post_num:
del item_local.post_num
if item_local.category:
del item_local.category
if item_local.intervencion:
del item_local.intervencion
if item_local.viewmode:
del item_local.viewmode
item_local.extra2 = True
del item_local.extra2
item_local.text_bold = True
del item_local.text_bold
item_local.text_color = True
del item_local.text_color
if item_local.url_plus:
del item_local.url_plus
title_subs = [] #creamos una lista para guardar info importante
item_local.language = [] #creamos lista para los idiomas
item_local.quality = '' #iniciamos calidad
quality_alt = ''
if 'series' in cat_ppal or extra == 'series':
item_local.thumbnail = cat_ppal #si son series, contiene el thumb
else:
quality_alt = scrapedurl.lower().strip() #si no son series, contiene la calidad
item_local.thumbnail = '' #guardamos el thumb
item_local.extra = extra #guardamos el extra procesado
item_local.url = url #guardamos la url final
item_local.context = "['buscar_trailer']"
item_local.contentType = "movie" #por defecto, son películas
item_local.action = "findvideos"
#Analizamos los formatos de la películas
if '/peliculas/' in scrapedurl or item_local.extra == 'Películas':
item_local.quality = 'HDRip '
elif '/peliculas-hd' in scrapedurl or item_local.extra == 'Películas HD':
item_local.quality = 'HD '
elif '/peliculas-dvdr' in scrapedurl or item_local.extra == 'Películas DVDR':
item_local.quality = 'DVDR '
elif 'subtituladas' in cat_ppal or item_local.extra == 'VOSE' or 'vose' in title.lower():
item_local.language += ['VOSE']
elif 'Version Original' in cat_ppal or item_local.extra == 'VO' or 'vo' in title.lower():
item_local.language += ['VO']
#Analizamos los formatos de series, temporadas y episodios
elif '/series' in scrapedurl or item_local.extra == 'series':
item_local.contentType = "tvshow"
item_local.action = "episodios"
item_local.season_colapse = True #Muestra las series agrupadas por temporadas
elif item_local.extra == 'episodios':
item_local.contentType = "episode"
item_local.extra = "episodios"
if "Temp" in title or "emporada" in title:
try:
item_local.contentSeason = int(scrapertools.find_single_match(title, '[t|T]emp.*?(\d+)'))
except:
item_local.contentSeason = 1
title = re.sub(r'[t|T]emp.*?\d+', '', title)
title_subs += ["Temporada"]
item_local.contentType = "season"
item_local.extra = "season"
if item_local.contentType == "movie": #para las peliculas ponemos el mismo extra
item_local.extra = "peliculas"
#Detectamos idiomas
if "latino" in scrapedurl.lower() or "latino" in title.lower():
item_local.language += ['LAT']
elif "vose" in scrapedurl.lower() or "vos" in scrapedurl.lower() or "vose" in title.lower() or "vos" in title.lower():
item_local.language += ['VOSE']
if item_local.language == []:
item_local.language = ['CAST']
#Detectamos el año
patron = '(\d{4})\s*?(?:\)|\])?$'
item_local.infoLabels["year"] = '-'
year = ''
year = scrapertools.find_single_match(title, patron)
if year:
title_alt = re.sub(patron, "", title)
title_alt = title_alt.strip()
if title_alt:
title = title_alt
try:
year = int(year)
if year >= 1970 and year <= 2040:
item_local.infoLabels["year"] = year
except:
pass
#Detectamos info importante a guardar para después de TMDB
if scrapertools.find_single_match(title, '[m|M].*?serie'):
title = re.sub(r'[m|M]iniserie', '', title)
title_subs += ["Miniserie"]
if scrapertools.find_single_match(title, '[s|S]aga'):
title = re.sub(r'[s|S]aga', '', title)
title_subs += ["Saga"]
if scrapertools.find_single_match(title, '[c|C]olecc'):
title = re.sub(r'[c|C]olecc...', '', title)
title_subs += ["Colección"]
#Empezamos a limpiar el título en varias pasadas
patron = '\s?-?\s?(line)?\s?-\s?$'
regex = re.compile(patron, re.I)
title = regex.sub("", title)
title = re.sub(r'\(\d{4}\s*?\)', '', title)
title = re.sub(r'\[\d{4}\s*?\]', '', title)
title = re.sub(r'[s|S]erie', '', title)
title = re.sub(r'- $', '', title)
title = re.sub(r'\d+[M|m|G|g][B|b]', '', title)
#Limpiamos el título de la basura innecesaria
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "").replace("LATINO", "").replace("Spanish", "").replace("Trailer", "").replace("Audio", "")
title = title.replace("HDTV-Screener", "").replace("DVDSCR", "").replace("TS ALTA", "").replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("HDRip", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "").replace(" 480p", "").replace(" 480P", "").replace(" 720p", "").replace(" 720P", "").replace(" 1080p", "").replace(" 1080P", "").replace("DVDRip", "").replace(" Dvd", "").replace(" DVD", "").replace(" V.O", "").replace(" Unrated", "").replace(" UNRATED", "").replace(" unrated", "").replace("screener", "").replace("TS-SCREENER", "").replace("TSScreener", "").replace("HQ", "").replace("AC3 5.1", "").replace("Telesync", "").replace("Line Dubbed", "").replace("line Dubbed", "").replace("LineDuB", "").replace("Line", "").replace("XviD", "").replace("xvid", "").replace("XVID", "").replace("Mic Dubbed", "").replace("HD", "").replace("V2", "").replace("CAM", "").replace("VHS.SCR", "").replace("Dvd5", "").replace("DVD5", "").replace("Iso", "").replace("ISO", "").replace("Reparado", "").replace("reparado", "").replace("DVD9", "").replace("Dvd9", "")
#Obtenemos temporada y episodio si se trata de Episodios
if item_local.contentType == "episode":
patron = '(\d+)[x|X](\d+)'
try:
item_local.contentSeason, item_local.contentEpisodeNumber = scrapertools.find_single_match(title, patron)
except:
item_local.contentSeason = 1
item_local.contentEpisodeNumber = 0
#Si son eisodios múltiples, lo extraemos
patron1 = '\d+[x|X]\d+.?(?:y|Y|al|Al)?.?\d+[x|X](\d+)'
epi_rango = scrapertools.find_single_match(title, patron1)
if epi_rango:
item_local.infoLabels['episodio_titulo'] = 'al %s' % epi_rango
title = re.sub(patron1, '', title)
else:
title = re.sub(patron, '', title)
#Terminamos de limpiar el título
title = re.sub(r'\??\s?\d*?\&.*', '', title)
title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title)
title = title.replace('()', '').replace('[]', '').strip().lower().title()
item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título
#Salvamos el título según el tipo de contenido
if item_local.contentType == "movie":
item_local.contentTitle = title
else:
item_local.contentSerieName = title.strip().lower().title()
if item_local.contentType == "episode":
item_local.title = '%sx%s ' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
item_local.extra3 = 'completa'
else:
item_local.title = title.strip().lower().title()
if scrapertools.find_single_match(size, '\d+.\d+\s?[g|G|m|M][b|B]'):
size = size.replace('b', ' B').replace('B', ' B').replace('b', 'B').replace('g', 'G').replace('m', 'M').replace('.', ',')
item_local.quality += '[%s]' % size
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
item_local.title_subs = title_subs
#Salvamos y borramos el número de temporadas porque TMDB a veces hace tonterias. Lo pasamos como serie completa
if item_local.contentSeason and (item_local.contentType == "season" or item_local.contentType == "tvshow"):
item_local.contentSeason_save = item_local.contentSeason
del item_local.infoLabels['season']
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
#logger.debug(item_local)
#Pasamos a TMDB la lista completa Itemlist
tmdb.set_infoLabels(itemlist, __modo_grafico__)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
# Si es necesario añadir paginacion
if curr_page <= last_page:
if last_page:
title = '%s de %s' % (curr_page-1, last_page)
else:
title = '%s' % curr_page-1
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, title_lista=title_lista, url=next_page_url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page)))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
matches = []
item.category = categoria
#logger.debug(item)
#Bajamos los datos de la página
data = ''
patron = '<a onclick="eventDownloadTorrent\(.*?\)".?class="linktorrent" href="([^"]+)"'
if item.contentType == 'movie': #Es una peli
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except:
pass
if not data:
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
matches = item.emergency_urls[0] #Restauramos matches
item.armagedon = True #Marcamos la situación como catastrófica
else:
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
return item #Devolvemos el Item de la llamada
else:
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
if not item.armagedon: #Si es un proceso normal, seguimos
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
else:
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
matches = item.emergency_urls[0] #Restauramos matches
item.armagedon = True #Marcamos la situación como catastrófica
else:
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
return item #Devolvemos el Item de la llamada
else:
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
else: #Es un episodio
matches = [item.url]
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
if item.videolibray_emergency_urls:
item.emergency_urls = []
item.emergency_urls.append(matches) #Salvamnos matches...
return item #... y nos vamos
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
if not item.videolibray_emergency_urls:
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
#Ahora tratamos los enlaces .torrent
for scrapedurl in matches: #leemos los torrents con la diferentes calidades
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
#Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]')
if not size and not item.armagedon:
size = generictools.get_torrent_size(scrapedurl) #Buscamos el tamaño en el .torrent
if size:
item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b')
item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía
item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad
#Ahora pintamos el link del Torrent
item_local.url = scrapedurl
if host not in item_local.url and host.replace('https', 'http') not in item_local.url and not item.armagedon:
item_local.url = host + item_local.url
if item_local.url and not item.armagedon and item.emergency_urls:
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
if item.armagedon: #Si es catastrófico, lo marcamos
item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language))
#Preparamos título y calidad, quitamos etiquetas vacías
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
def episodios(item):
logger.info()
itemlist = []
item.category = categoria
#logger.debug(item)
if item.from_title:
item.title = item.from_title
#Limpiamos num. Temporada y Episodio que ha podido quedar por Novedades
season_display = 0
if item.contentSeason:
if item.season_colapse: #Si viene del menú de Temporadas...
season_display = item.contentSeason #... salvamos el num de sesión a pintar
item.from_num_season_colapse = season_display
del item.season_colapse
item.contentType = "tvshow"
if item.from_title_season_colapse:
item.title = item.from_title_season_colapse
del item.from_title_season_colapse
if item.infoLabels['title']:
del item.infoLabels['title']
del item.infoLabels['season']
if item.contentEpisodeNumber:
del item.infoLabels['episode']
if season_display == 0 and item.from_num_season_colapse:
season_display = item.from_num_season_colapse
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
if not item.infoLabels['tmdb_id']:
tmdb.set_infoLabels(item, True)
modo_ultima_temp_alt = modo_ultima_temp
if item.ow_force == "1": #Si hay un traspaso de canal o url, se actualiza todo
modo_ultima_temp_alt = False
max_temp = 1
if item.infoLabels['number_of_seasons']:
max_temp = item.infoLabels['number_of_seasons']
y = []
if modo_ultima_temp_alt and item.library_playcounts: #Averiguar cuantas temporadas hay en Videoteca
patron = 'season (\d+)'
matches = re.compile(patron, re.DOTALL).findall(str(item.library_playcounts))
for x in matches:
y += [int(x)]
max_temp = max(y)
# Descarga la página
data = '' #Inserto en num de página en la url
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
except: #Algún error de proceso, salimos
pass
if not data:
logger.error("ERROR 01: EPISODIOS: La Web no responde o la URL es erronea" + item.url)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: EPISODIOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
return itemlist
#Usamos el mismo patrón que en listado
patron = '<tr><td><img src="[^"]+".*?title="Idioma Capitulo" \/>(.*?)<a onclick="[^"]+".?href="[^"]+".?title="[^"]*">(.*?)<\/a><\/td><td><a href="([^"]+)".?title="[^"]*".?onclick="[^"]+".?<img src="([^"]+)".*?<\/a><\/td><td>.*?<\/td><\/tr>'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
season = max_temp
#Comprobamos si realmente sabemos el num. máximo de temporadas
if item.library_playcounts or (item.infoLabels['number_of_seasons'] and item.tmdb_stat):
num_temporadas_flag = True
else:
num_temporadas_flag = False
# Recorremos todos los episodios generando un Item local por cada uno en Itemlist
for language, scrapedtitle, scrapedurl, scrapedthumbnail in matches:
item_local = item.clone()
item_local.action = "findvideos"
item_local.contentType = "episode"
item_local.extra = "episodios"
if item_local.library_playcounts:
del item_local.library_playcounts
if item_local.library_urls:
del item_local.library_urls
if item_local.path:
del item_local.path
if item_local.update_last:
del item_local.update_last
if item_local.update_next:
del item_local.update_next
if item_local.channel_host:
del item_local.channel_host
if item_local.active:
del item_local.active
if item_local.contentTitle:
del item_local.infoLabels['title']
if item_local.season_colapse:
del item_local.season_colapse
item_local.title = ''
item_local.context = "['buscar_trailer']"
item_local.url = scrapedurl
title = scrapedtitle
item_local.language = []
lang = language.strip()
if not lang:
item_local.language += ['CAST']
elif 'vo' in lang.lower() or 'v.o' in lang.lower() or 'vo' in title.lower() or 'v.o' in title.lower():
item_local.language += ['VO']
elif 'vose' in lang.lower() or 'v.o.s.e' in lang.lower() or 'vose' in title.lower() or 'v.o.s.e' in title.lower():
item_local.language += ['VOSE']
elif 'latino' in lang.lower() or 'latino' in title.lower():
item_local.language += ['LAT']
try:
item_local.contentEpisodeNumber = 0
if 'miniserie' in title.lower():
item_local.contentSeason = 1
title = title.replace('miniserie', '').replace('MiniSerie', '')
elif 'completa' in title.lower():
patron = '[t|T].*?(\d+) [c|C]ompleta'
if scrapertools.find_single_match(title, patron):
item_local.contentSeason = int(scrapertools.find_single_match(title, patron))
if not item_local.contentSeason:
#Extraemos los episodios
patron = '(\d{1,2})[x|X](\d{1,2})'
item_local.contentSeason, item_local.contentEpisodeNumber = scrapertools.find_single_match(title, patron)
item_local.contentSeason = int(item_local.contentSeason)
item_local.contentEpisodeNumber = int(item_local.contentEpisodeNumber)
except:
logger.error('ERROR al extraer Temporada/Episodio: ' + title)
item_local.contentSeason = 1
item_local.contentEpisodeNumber = 0
#Si son eisodios múltiples, lo extraemos
patron1 = '\d+[x|X]\d{1,2}.?(?:y|Y|al|Al)?(?:\d+[x|X]\d{1,2})?.?(?:y|Y|al|Al)?.?\d+[x|X](\d{1,2})'
epi_rango = scrapertools.find_single_match(title, patron1)
if epi_rango:
item_local.infoLabels['episodio_titulo'] = 'al %s' % epi_rango
item_local.title = '%sx%s al %s -' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), str(epi_rango).zfill(2))
else:
item_local.title = '%sx%s -' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
if modo_ultima_temp_alt and item.library_playcounts: #Si solo se actualiza la última temporada de Videoteca
if item_local.contentSeason < max_temp:
break #Sale del bucle actual del FOR
if season_display > 0:
if item_local.contentSeason > season_display:
continue
elif item_local.contentSeason < season_display:
break
itemlist.append(item_local.clone())
#logger.debug(item_local)
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
if item.season_colapse and not item.add_videolibrary: #Si viene de listado, mostramos solo Temporadas
item, itemlist = generictools.post_tmdb_seasons(item, itemlist)
if not item.season_colapse: #Si no es pantalla de Temporadas, pintamos todo
# Pasada por TMDB y clasificación de lista por temporada y episodio
tmdb.set_infoLabels(itemlist, True)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
#logger.debug(item)
return itemlist
def actualizar_titulos(item):
logger.info()
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
#Volvemos a la siguiente acción en el canal
return item
def search(item, texto):
logger.info()
#texto = texto.replace(" ", "+")
try:
item.url = item.url % texto
if texto != '':
return listado(item)
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + "peliculas-dvdr/"
item.extra = "Películas DVDR"
item.channel = channel
item.category_new= 'newest'
itemlist = listado(item)
if ">> Página siguiente" in itemlist[-1].title:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,21 +0,0 @@
{
"id": "documentalesonline",
"name": "Documentales Online",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "http://i.imgur.com/fsrnC4m.jpg",
"categories": [
"documentary"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,154 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from channelselector import get_thumb
from platformcode import logger
HOST = "http://documentales-online.com/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Novedades", action="videos", url=HOST,
thumbnail=get_thumb('newest', auto=True)))
itemlist.append(Item(channel=item.channel, title="Destacados", action="seccion", url=HOST, extra="destacados",
thumbnail=get_thumb('hot', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series destacadas", action="seccion", url=HOST, extra="series",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Categorías", action="categorias", url=HOST,
thumbnail=get_thumb('categories', auto=True)))
itemlist.append(Item(channel=item.channel, title="Top 100", action="listado", url=HOST + "top/",
thumbnail=get_thumb('more voted', auto=True)))
itemlist.append(Item(channel=item.channel, title="Populares", action="listado", url=HOST + "populares/",
thumbnail=get_thumb('more watched', auto=True)))
itemlist.append(Item(channel=item.channel, title="Series y Temas", action="listado", url=HOST + "series-temas/",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search",
thumbnail=get_thumb('search', auto=True)))
return itemlist
def listado(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = data.replace('<span class="wpp-views">', '')
bloque = scrapertools.find_single_match(data, 'class="post-entry(.*?)class="post-share')
if "series-temas" not in item.url:
patron = '<a href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += '/a>([^<]+)<'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedextra in matches:
itemlist.append(Item(action = "findvideos",
channel = item.channel,
title = scrapedtitle + scrapedextra,
url = HOST + scrapedurl
))
else:
patron = """<a href='([^']+)'.*?"""
patron += """>([^<]+)<.*?"""
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(action = "videos",
channel = item.channel,
title = scrapedtitle,
url = HOST + scrapedurl
))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.extra == "destacados":
patron_seccion = '<h4 class="widget-title">Destacados</h4><div class="textwidget"><ul>(.*?)</ul>'
action = "findvideos"
else:
patron_seccion = '<h4 class="widget-title">Series destacadas</h4><div class="textwidget"><ul>(.*?)</ul>'
action = "videos"
data = scrapertools.find_single_match(data, patron_seccion)
matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)">(.*?)</a>')
aux_action = action
for url, title in matches:
if item.extra != "destacados" and "Cosmos (Carl Sagan)" in title:
action = "findvideos"
else:
action = aux_action
itemlist.append(item.clone(title=title, url=url, action=action, fulltitle=title))
return itemlist
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
pagination = scrapertools.find_single_match(data, "rel='next' href='([^']+)'")
if not pagination:
pagination = scrapertools.find_single_match(data, '<span class=\'current\'>\d</span>'
'<a class="page larger" href="([^"]+)">')
patron = '<ul class="sp-grid">(.*?)</ul>'
data = scrapertools.find_single_match(data, patron)
matches = re.compile('<a href="([^"]+)">(.*?)</a>.*?<img.*?src="([^"]+)"', re.DOTALL).findall(data)
for url, title, thumb in matches:
itemlist.append(item.clone(title=title, url=url, action="findvideos", fulltitle=title, thumbnail=thumb))
if pagination:
itemlist.append(item.clone(title=">> Página siguiente", url=pagination))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, 'a href="#">Categorías</a><ul class="sub-menu">(.*?)</ul>')
matches = scrapertools.find_multiple_matches(data, '<a href="([^"]+)">(.*?)</a>')
for url, title in matches:
itemlist.append(item.clone(title=title, url=url, action="videos", fulltitle=title))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url = HOST + "?s=%s" % texto
return videos(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if "Cosmos (Carl Sagan)" in item.title:
patron = '(?s)<p><strong>([^<]+)<.*?'
patron += '<iframe.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data,patron)
for title, url in matches:
itemlist.append(item.clone(action = "play", title=title, url=url
))
else:
data = scrapertools.find_multiple_matches(data, '<iframe.+?src="([^"]+)"')
itemlist.extend(servertools.find_video_items(data=",".join(data)))
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle
videoitem.channel = item.channel
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -1,68 +0,0 @@
{
"id": "doomtv",
"name": "doomtv",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "https://s2.postimg.cc/jivgi4ak9/doomtv.png",
"banner": "https://s32.postimg.cc/6gxyripvp/doomtv_banner.png",
"version": 1,
"categories": [
"movie",
"direct"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,242 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
IDIOMAS = {'Latino': 'Latino'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['dostream', 'openload']
host = 'http://doomtv.net/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
item.clone(title="Todas",
action="lista",
thumbnail=get_thumb('all', auto=True),
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
url='%s%s'%(host,'peliculas/'),
first=0
))
itemlist.append(
item.clone(title="Generos",
action="seccion",
thumbnail=get_thumb('genres', auto=True),
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
url='%s%s' % (host, 'peliculas/'),
))
itemlist.append(
item.clone(title="Mas Vistas",
action="lista",
thumbnail=get_thumb('more watched', auto=True),
fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png',
url='%s%s'%(host,'top-imdb/'),
first=0
))
itemlist.append(
item.clone(title="Buscar",
action="search",
url='http://doomtv.net/?s=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista(item):
logger.info()
itemlist = []
next = False
data = get_source(item.url)
patron = 'movie-id=.*?href="([^"]+)" data-url.*?quality">([^<]+)<.*?img data-original="([^"]+)" class.*?'
patron += '<h2>([^<]+)<\/h2>.*?<p>([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
first = item.first
last = first + 19
if last > len(matches):
last = len(matches)
next = True
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]:
url = host+scrapedurl
thumbnail = 'https:'+scrapedthumbnail.strip()
filtro_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb.strip()}
filtro_list = filtro_list.items()
title = scrapedtitle
fanart = ''
plot = plot
itemlist.append(
Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
infoLabels={'filtro': filtro_list},
fanart=fanart,
contentTitle=title
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
if not next:
url_next_page = item.url
first = last
else:
url_next_page = scrapertools.find_single_match(data, "<li class='active'>.*?class='page larger' href='([^']+)'")
first = 0
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
return itemlist
def seccion(item):
logger.info()
itemlist = []
duplicado = []
data = get_source(item.url)
patron = 'menu-item-object-category menu-item-\d+"><a href="([^"]+)">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = host+scrapedurl
title = scrapedtitle
thumbnail = ''
if url not in duplicado:
itemlist.append(
Item(channel=item.channel,
action='lista',
title=title,
url=url,
thumbnail=thumbnail,
first=0
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.first=0
if texto != '':
return lista(item)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas', 'latino']:
item.url = host +'peliculas/page/1'
elif categoria == 'infantiles':
item.url = host + 'categoria/animacion/'
elif categoria == 'terror':
item.url = host + '/categoria/terror/'
item.first=0
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id="(tab\d+)"><div class="movieplay">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
language = 'Latino'
if 'http' not in urls:
urls = 'https:'+urls
if not config.get_setting('unify'):
title = ' [%s]' % language
else:
title = '%s'
new_item = Item(
channel=item.channel,
url=urls,
title= '%s'+ title,
contentTitle=item.title,
action='play',
language = IDIOMAS[language],
infoLabels = item.infoLabels
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle,
))
return itemlist

View File

@@ -1,79 +0,0 @@
{
"id": "dospelis",
"name": "DosPelis",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "https://www.dospelis.net/wp-content/uploads/2019/02/logodospelisamor.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -1,346 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel DosPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'mx': 'Latino', 'dk':'Latino', 'es': 'Castellano', 'en': 'VOSE', 'gb':'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = [
'directo',
'openload',
]
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'dospelis')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'dospelis')
host = 'https://dospelis.com/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'tvshows', action='list_all', type='tvshows',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
extra='movie'))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
for lang in lang_list:
if lang == 'en':
lang = 'vose'
if lang not in language:
language.append(lang)
return language
def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host+item.type)
if 'Genero' in item.title:
patron = '<liclass="cat-item cat-item-\d+"><ahref=([^ ]+) .*?>(.*?)/i>'
elif 'Año' in item.title:
patron = '<li><ahref=(.*?release.*?)>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
plot=''
if 'Genero' in item.title:
quantity = scrapertools.find_single_match(scrapedtitle,'<i>(.*?)<')
title = scrapertools.find_single_match(scrapedtitle,'(.*?)</')
title = title
plot = '%s elementos' % quantity.replace('.','')
else:
title = scrapedtitle
if title not in duplicados:
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, plot=plot, action='list_all',
type=item.type))
duplicados.append(title)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<articleid=post-\d+ class="item movies"><divclass=poster>.?<imgsrc=([^ ]+) alt="([^"]+)">.*?'
patron += 'quality>([^<]+)<.*?<ahref=([^>]+)>.*?<\/h3><span>([^<]+)<.*?flags(.*?)metadata'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year, lang_data in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
language = get_language(lang_data)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=language,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<articleid=post-\d+ class="item tvshows">.?<divclass=poster>.?<imgsrc=([^ ]+)'
patron += ' alt="([^"]+)">.*?<ahref=([^>]+)>.*?<\/h3>.?<span>(.*?)<\/span><\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<linkrel=next href=([^>]+)>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='title>Temporada.?(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='class=numerando>%s - (\d+)</div>.?<divclass=episodiotitle>.?<ahref=([^>]+)>([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id=option-(\d+).*?src=([^ ]+) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
lang=''
for option, scrapedurl in matches:
lang = scrapertools.find_single_match(data, 'href=#option-%s>.*?/flags/(.*?).png' % option)
quality = ''
if 'goo.gl' in scrapedurl:
new_data = httptools.downloadpage(scrapedurl, follow_redirects=False).headers
scrapedurl = new_data['location']
if lang not in IDIOMAS:
lang = 'en'
title = '%s %s'
itemlist.append(
Item(channel=item.channel, url=scrapedurl, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<ahref=([^>]+)><imgsrc=([^ ]+) alt="([^"]+)">.*?year>([^<]+)<(.*?)<p>([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
language = get_language(lang_data)
if language:
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot,
action=action,
language=language, infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + 'genre/animacion/'
elif categoria == 'terror':
item.url = host + 'genre/terror/'
item.type='movies'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "ecarteleratrailers",
"name": "Trailers ecartelera",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "ecarteleratrailers.png",
"banner": "ecarteleratrailers.png",
"categories": [
"movie"
]
}

View File

@@ -1,85 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core.item import Item
from platformcode import logger
from core import httptools
def mainlist(item):
logger.info()
itemlist = []
if item.url == "":
item.url = "http://www.ecartelera.com/videos/"
# ------------------------------------------------------
# Descarga la página
# ------------------------------------------------------
data = httptools.downloadpage(item.url).data
# logger.info(data)
# ------------------------------------------------------
# Extrae las películas
# ------------------------------------------------------
patron = '<div class="viditem"[^<]+'
patron += '<div class="fimg"><a href="([^"]+)"><img alt="([^"]+)" src="([^"]+)"/><p class="length">([^<]+)</p></a></div[^<]+'
patron += '<div class="fcnt"[^<]+'
patron += '<h4><a[^<]+</a></h4[^<]+'
patron += '<p class="desc">([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail, duration, scrapedplot in matches:
title = scrapedtitle + " (" + duration + ")"
url = scrapedurl
thumbnail = scrapedthumbnail
plot = scrapedplot.strip()
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
plot=plot,folder=False))
# ------------------------------------------------------
# Extrae la página siguiente
# ------------------------------------------------------
patron = '<a href="([^"]+)">Siguiente</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
scrapedtitle = "Pagina siguiente"
scrapedurl = match
scrapedthumbnail = ""
scrapeddescription = ""
# Añade al listado de XBMC
itemlist.append(Item(channel=item.channel, action="mainlist", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, server="directo", folder=True,
viewmode="movie_with_plot"))
return itemlist
# Reproducir un vídeo
def play(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
logger.info(data)
# Extrae las películas
patron = '<source src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
url = urlparse.urljoin(item.url, matches[0])
logger.info("url=" + url)
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=url, thumbnail=item.thumbnail,
plot=item.plot, server="directo", folder=False))
return itemlist

View File

@@ -1,101 +0,0 @@
{
"id": "elitetorrent",
"name": "Elite Torrent",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "elitetorrent.png",
"banner": "elitetorrent.png",
"categories": [
"torrent",
"movie",
"tvshow",
"documentary",
"vos"
],
"settings":[
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra en TMDB",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "emergency_urls",
"type": "list",
"label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?",
"default": 1,
"enabled": true,
"visible": true,
"lvalues": [
"No",
"Guardar",
"Borrar",
"Actualizar"
]
},
{
"id": "emergency_urls_torrents",
"type": "bool",
"label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?",
"default": true,
"enabled": true,
"visible": "!eq(-1,'No')"
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
}
]
}

View File

@@ -1,522 +0,0 @@
# -*- coding: utf-8 -*-
import re
import sys
import urllib
import urlparse
import time
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = 'http://www.elitetorrent.io'
channel = "elitetorrent"
categoria = channel.capitalize()
__modo_grafico__ = config.get_setting('modo_grafico', channel)
timeout = config.get_setting('timeout_downloadpage', channel)
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis = get_thumb("channels_movie.png")
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_hd = get_thumb("channels_tvshow_hd.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, extra="peliculas", thumbnail=thumb_pelis))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host, thumbnail=thumb_buscar, filter_lang=True))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def submenu(item):
logger.info()
itemlist = []
item.filter_lang = True
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
except:
pass
if not data:
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
return itemlist #Algo no funciona, pintamos lo que tenemos
patron = '<div class="cab_menu"\s*>.*?<\/div>' #Menú principal
data1 = scrapertools.find_single_match(data, patron)
patron = '<div id="menu_langen"\s*>.*?<\/div>' #Menú de idiomas
data1 += scrapertools.find_single_match(data, patron)
patron = '<a href="(.*?)".*?title="(.*?)"' #Encontrar todos los apartados
matches = re.compile(patron, re.DOTALL).findall(data1)
if not matches:
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
for clone_inter, autoridad in item.intervencion:
thumb_intervenido = get_thumb(autoridad)
itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
return itemlist #Salimos
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data1)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
for scrapedurl, scrapedtitle in matches:
scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
scrapedtitle = scrapedtitle.replace(" torrent", "").replace(" Torrent", "").replace("Series y ", "").title()
if "castellano" in scrapedtitle.lower(): #Evita la entrada de peliculas castellano del menú de idiomas
continue
if item.extra == "series": #Tratamos Series
if not "/serie" in scrapedurl:
continue
else: #Tratamos Películas
if "/serie" in scrapedurl:
continue
if 'subtitulado' in scrapedtitle.lower() or 'latino' in scrapedtitle.lower() or 'original' in scrapedtitle.lower():
item.filter_lang = False
itemlist.append(item.clone(action="listado", title=scrapedtitle, url=scrapedurl))
if item.extra == "series": #Añadimos Series VOSE que está fuera del menú principal
itemlist.append(item.clone(action="listado", title="Series VOSE", url=host + "/series-vose/", filter_lang=False))
return itemlist
def listado(item):
logger.info()
itemlist = []
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
if timeout_search < 5:
timeout_search = 5 # Timeout un poco más largo para las búsquedas
# Descarga la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout_search).data)
except:
pass
if not data: #Si la web está caída salimos sin dar error
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
patron = '<div id="principal">.*?<\/nav><\/div><\/div>'
data = scrapertools.find_single_match(data, patron)
patron = '<li>\s*<div\s*class="[^"]+">\s*<a href="([^"]+)"\s*' #url
patron += 'title="([^"]+)"\s*(?:alt="[^"]+")?\s*>\s*' #título
patron += '<img (?:class="[^"]+")?\s*src="([^"]+)"\s*border="[^"]+"\s*' #thumb
patron += 'title="([^"]+)".*?' #categoría, idioma
patron += '<span class="[^"]+" style="[^"]+"\s*><i>(.*?)<\/i><\/span.*?' #calidad
patron += '="dig1">(.*?)<.*?' #tamaño
patron += '="dig2">(.*?)<\/span><\/div>' #tipo tamaño
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches and not '<title>503 Backend fetch failed</title>' in data and not 'No se han encontrado resultados' in data: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_listado(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcategory, scrapedcalidad, scrapedsize, scrapedsizet in matches:
item_local = item.clone() #Creamos copia de Item para trabajar
title = re.sub('\r\n', '', scrapedtitle).decode('utf8').strip()
title = title.replace(" torrent", "").replace(" Torrent", "").replace("Series y ", "")
item_local.url = urlparse.urljoin(host, scrapedurl)
item_local.thumbnail = urlparse.urljoin(host, scrapedthumbnail)
if "---" in scrapedcalidad: #limpiamos calidades
scrapedcalidad = ''
if "microhd" in title.lower():
item_local.quality = "microHD"
if not "/series-vose/" in item.url and not item_local.quality:
item_local.quality = scrapedcalidad
if scrapertools.find_single_match(item_local.quality, r'\d+\.\d+'):
item_local.quality = ''
if not item_local.quality and ("DVDRip" in title or "HDRip" in title or "BR-LINE" in title or "HDTS-SCREENER" in title or "BDRip" in title or "BR-Screener" in title or "DVDScreener" in title or "TS-Screener" in title):
item_local.quality = scrapertools.find_single_match(title, r'\((.*?)\)')
item_local.quality = item_local.quality.replace("Latino", "")
if not scrapedsizet:
scrapedsize = ''
else:
item_local.quality += ' [%s %s]' % (scrapedsize.replace(".", ","), scrapedsizet)
item_local.language = [] #Verificamos el idioma por si encontramos algo
if "latino" in scrapedcategory.lower() or "latino" in item.url or "latino" in title.lower():
item_local.language += ["LAT"]
if "ingles" in scrapedcategory.lower() or "ingles" in item.url or "vose" in scrapedurl or "vose" in item.url:
if "VOSE" in scrapedcategory.lower() or "sub" in title.lower() or "vose" in scrapedurl or "vose" in item.url:
item_local.language += ["VOS"]
else:
item_local.language += ["VO"]
if "dual" in scrapedcategory.lower() or "dual" in title.lower():
item_local.language[0:0] = ["DUAL"]
if item_local.language == []:
item_local.language = ['CAST'] #Por defecto
#Limpiamos el título de la basura innecesaria
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("Sub", "").replace("sub", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "")
title = title.replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("(HDRip)", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "").replace("temporada", "").replace("Temporada", "").replace("capitulo", "").replace("Capitulo", "")
title = re.sub(r'(?:\d+)?x.?\s?\d+', '', title)
title = re.sub(r'\??\s?\d*?\&.*', '', title).title().strip()
item_local.from_title = title #Guardamos esta etiqueta para posible desambiguación de título
if item_local.extra == "peliculas": #preparamos Item para películas
if "/serie" in scrapedurl or "/serie" in item.url:
continue
if not "/serie" in scrapedurl and not "/serie" in item.url:
item_local.contentType = "movie"
item_local.contentTitle = title
item_local.extra = "peliculas"
if item_local.extra == "series": #preparamos Item para series
if not "/serie" in scrapedurl and not "/serie" in item.url:
continue
if "/serie" in scrapedurl or "/serie" in item.url:
item_local.contentType = "episode"
item_local.extra = "series"
epi_mult = scrapertools.find_single_match(item_local.url, r'cap.*?-\d+-al-(\d+)')
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'temporada-(\d+)')
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'cap.*?-(\d+)')
if not item_local.contentSeason:
item_local.contentSeason = scrapertools.find_single_match(item_local.url, r'-(\d+)[x|X]\d+')
if not item_local.contentEpisodeNumber:
item_local.contentEpisodeNumber = scrapertools.find_single_match(item_local.url, r'-\d+[x|X](\d+)')
if not item_local.contentSeason or item_local.contentSeason < 1:
item_local.contentSeason = 0
if item_local.contentEpisodeNumber < 1:
item_local.contentEpisodeNumber = 1
item_local.contentSerieName = title
if epi_mult:
title = "%sx%s al %s -" % (item_local.contentSeason, str(item_local.contentEpisodeNumber).zfill(2), str(epi_mult).zfill(2)) #Creamos un título con el rango de episodios
else:
title = '%sx%s ' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
item_local.action = "findvideos"
item_local.title = title.strip()
item_local.infoLabels['year'] = "-"
#Pasamos a TMDB cada Item, para evitar el efecto memoria de tmdb
#if item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global, pasamos
# tmdb.set_infoLabels(item_local, True)
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0 and item.filter_lang: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
#if not item.category: #Si este campo no existe es que viene de la primera pasada de una búsqueda global
# return itemlist #Retornamos sin pasar por la fase de maquillaje para ahorra tiempo
#Pasamos a TMDB la lista completa Itemlist
tmdb.set_infoLabels(itemlist, True)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
# Extrae el paginador
patron = '<div class="paginacion">.*?<span class="pagina pag_actual".*?'
patron += "<a href='([^']+)'.*?" #url siguiente página
patron += 'class="pagina">(\d+)<' #próxima página
matches = scrapertools.find_single_match(data, patron)
patron = 'class="pagina pag_sig">Siguiente.*?'
patron += "<a href='.*?\/page\/(\d+)\/" #total de páginas
last_page = scrapertools.find_single_match(data, patron)
if not last_page:
patron = '<div class="paginacion">.*?'
patron += 'class="pagina">(\d+)<\/a><\/div><\/nav><\/div><\/div>' #total de páginas
last_page = scrapertools.find_single_match(data, patron)
if matches:
scrapedurl = urlparse.urljoin(item.url, matches[0])
if last_page:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s de %s' % (int(matches[1]) - 1, last_page)
else:
title = '[COLOR gold]Página siguiente >>[/COLOR] %s' % (int(matches[1]) - 1)
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=scrapedurl, extra=item.extra, filter_lang=item.filter_lang))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
#Bajamos los datos de la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
except:
pass
if not data:
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
link_torrent = item.emergency_urls[0][0] #Guardamos la url del .Torrent
link_magnet = item.emergency_urls[1][0] #Guardamos la url del .Magnet
item.armagedon = True #Marcamos la situación como catastrófica
else:
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
return item #Devolvemos el Item de la llamada
else:
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#data = unicode(data, "utf-8", errors="replace")
patron_t = '<div class="enlace_descarga".*?<a href="(.*?\.torrent)"'
patron_m = '<div class="enlace_descarga".*?<a href="(magnet:?.*?)"'
if not item.armagedon: #Si es un proceso normal, seguimos
link_torrent = scrapertools.find_single_match(data, patron_t)
link_torrent = urlparse.urljoin(item.url, link_torrent)
link_torrent = link_torrent.replace(" ", "%20") #sustituimos espacios por %20, por si acaso
#logger.info("link Torrent: " + link_torrent)
link_magnet = scrapertools.find_single_match(data, patron_m)
link_magnet = urlparse.urljoin(item.url, link_magnet)
#logger.info("link Magnet: " + link_magnet)
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
if (link_torrent or link_magnet) and item.videolibray_emergency_urls:
item.emergency_urls = []
item.emergency_urls.append([link_torrent]) #Salvamos el enlace de .torrent
item.emergency_urls.append([link_magnet]) #Salvamos el enlace de .magnet
return item #... y nos vamos
#Añadimos el tamaño para todos
size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w\s*[b|B]s*)\]')
if size:
item.title = re.sub('\s\[\d+,?\d*?\s\w\s*[b|B]s*\]', '', item.title) #Quitamos size de título, si lo traía
item.title = '%s [%s]' % (item.title, size) #Agregamos size al final del título
item.quality = re.sub('\s\[\d+,?\d*?\s\w\s*[b|B]s*\]', '', item.quality) #Quitamos size de calidad, si lo traía
if not link_torrent and not link_magnet: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
else:
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron_t + " / " + patron_m + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
link_torrent = item.emergency_urls[0][0] #Guardamos la url del .Torrent
link_magnet = item.emergency_urls[1][0] #Guardamos la url del .Magnet
item.armagedon = True #Marcamos la situación como catastrófica
else:
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
return item #Devolvemos el Item de la llamada
else:
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
if not size and not item.armagedon:
size = generictools.get_torrent_size(link_torrent) #Buscamos el tamaño en el .torrent
if size:
item.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad
item.quality = item.quality.replace("GB", "G B").replace("MB", "M B").replace("Gb", "G B").replace("Mb", "M B") #Se evita la palabra reservada en Unify
#Ahora pintamos el link del Torrent, si lo hay
if link_torrent: # Hay Torrent ?
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
if item_local.quality:
item_local.quality += " "
item_local.quality += "[Torrent]"
if item.armagedon: #Si es catastrófico, lo marcamos
item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality
item_local.url = link_torrent
if item_local.url and item.emergency_urls and not item.armagedon:
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
#Preparamos título y calidad, quitamos etiquetas vacías
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
#Ahora pintamos el link del Magnet, si lo hay
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if link_magnet: # Hay Magnet ?
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
if item_local.quality:
item_local.quality += " "
item_local.quality = item_local.quality.replace("[Torrent]", "") + "[Magnet]"
if item.armagedon: #Si es catastrófico, lo marcamos
item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality
item_local.url = link_magnet
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Magnet
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\[?\]?\]\[\/COLOR\]', '', item_local.title) #Quitamos etiquetas vacías
item_local.title = re.sub(r'\s\[COLOR \w+\]\[\/COLOR\]', '', item_local.title) #Quitamos colores vacíos
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
#logger.debug("TORRENT: " + link_torrent + "MAGNET: " + link_magnet + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / tamaño: " + size + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
def actualizar_titulos(item):
logger.info()
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
#Volvemos a la siguiente acción en el canal
return item
def search(item, texto):
logger.info("search:" + texto)
# texto = texto.replace(" ", "+")
try:
item.url = host + "?s=%s&x=0&y=0" % texto
itemlist = listado(item)
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
item.extra = "peliculas"
item.category_new= 'newest'
itemlist = listado(item)
if "Página siguiente >>" in itemlist[-1].title:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,68 +0,0 @@
{
"id": "estadepelis",
"name": "Estadepelis",
"active": true,
"adult": false,
"language": ["esp", "lat"],
"thumbnail": "https://s24.postimg.cc/nsgit7fhh/estadepelis.png",
"banner": "https://s28.postimg.cc/ud0l032ul/estadepelis_banner.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"VOS",
"Castellano"
]
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,467 +0,0 @@
# -*- coding: utf-8 -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://www.estadepelis.com/'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
IDIOMAS = {'Latino': 'Latino', 'Sub Español': 'VOS'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['yourupload', 'openload', 'sendvid']
vars = {
'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://tawnestdplsnetps.pw/',
'b48699bb49d4550f27879deeb948d4f7d9c5949a8': 'embed',
'JzewJkLlrvcFnLelj2ikbA': 'php?url=',
'p889c6853a117aca83ef9d6523335dc065213ae86': 'player',
'e20fb341325556c0fc0145ce10d08a970538987': 'http://yourupload.com/embed/'
}
tgenero = {"acción": "https://s3.postimg.cc/y6o9puflv/accion.png",
"animación": "https://s13.postimg.cc/5on877l87/animacion.png",
"aventura": "https://s10.postimg.cc/6su40czih/aventura.png",
"belico": "https://s23.postimg.cc/71itp9hcr/belica.png",
"ciencia ficción": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
"comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
"comedia romántica": "https://s21.postimg.cc/xfsj7ua0n/romantica.png",
"cortometrajes": "https://s15.postimg.cc/kluxxwg23/cortometraje.png",
"crimen": "https://s4.postimg.cc/6z27zhirx/crimen.png",
"cristianas": "https://s7.postimg.cc/llo852fwr/religiosa.png",
"deportivas": "https://s13.postimg.cc/xuxf5h06v/deporte.png",
"drama": "https://s16.postimg.cc/94sia332d/drama.png",
"familiar": "https://s7.postimg.cc/6s7vdhqrf/familiar.png",
"fantasía": "https://s13.postimg.cc/65ylohgvb/fantasia.png",
"guerra": "https://s4.postimg.cc/n1h2jp2jh/guerra.png",
"historia": "https://s15.postimg.cc/fmc050h1n/historia.png",
"intriga": "https://s27.postimg.cc/v9og43u2b/intriga.png",
"misterios": "https://s1.postimg.cc/w7fdgf2vj/misterio.png",
"musical": "https://s29.postimg.cc/bbxmdh9c7/musical.png",
"romance": "https://s15.postimg.cc/fb5j8cl63/romance.png",
"suspenso": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png",
"terror": "https://s7.postimg.cc/yi0gij3gb/terror.png",
"thriller": "https://s22.postimg.cc/5y9g0jsu9/thriller.png"}
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="menupeliculas",
thumbnail=get_thumb('movies', auto=True),
fanart='https://s8.postimg.cc/6wqwy2c2t/peliculas.png'
))
itemlist.append(item.clone(title="Series",
action="lista",
thumbnail=get_thumb('tvshows', auto=True),
fanart='https://s27.postimg.cc/iahczwgrn/series.png',
url=host + 'lista-de-series/',
extra='series'
))
itemlist.append(item.clone(title="Doramas",
action="lista", thumbnail=get_thumb('doramas', auto=True),
fanart='https://s15.postimg.cc/sjcthoa6z/doramas.png',
url=host + 'lista-de-doramas/',
extra='series'
))
itemlist.append(item.clone(title="Documentales",
action="lista",
thumbnail=get_thumb('documentaries', auto=True),
fanart='https://s16.postimg.cc/7xjj4bmol/documental.png',
url=host + 'lista-de-documentales/',
extra='peliculas'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + 'search?q=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menupeliculas(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
thumbnail=get_thumb('all', auto=True),
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
url=host + 'lista-de-peliculas/',
extra='peliculas'
))
itemlist.append(item.clone(title="Ultimas",
action="lista",
thumbnail=get_thumb('last', auto=True),
fanart='https://s22.postimg.cc/cb7nmhwv5/ultimas.png',
url=host,
extra='peliculas'
))
itemlist.append(item.clone(title="Generos",
action="generos",
thumbnail=get_thumb('genres', auto=True),
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
url=host,
extra='peliculas'
))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
contentSerieName = ''
patron = '<div class=movie><div class=imagen><img src=(.*?) alt=(.*?) width=.*? height=.*?\/><a href=(.*?)><span '
patron += 'class=player>.*?class=year>(.*?)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.extra == 'peliculas':
accion = 'findvideos'
else:
accion = 'temporadas'
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
scrapedurl = scrapedurl.translate(None, '"')
scrapedurl = scrapedurl.translate(None, "'")
url = host + scrapedurl
thumbnail = scrapedthumbnail
title = scrapedtitle
year = scrapedyear
if item.extra == 'series':
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel,
action=accion,
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=scrapedtitle,
extra=item.extra,
contentSerieName=contentSerieName,
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<div class=siguiente><a href=(.*?)>')
url = host + next_page
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
extra=item.extra
))
return itemlist
def generos(item):
logger.info()
itemlist = []
norep = []
data = httptools.downloadpage(item.url).data
logger.debug(data)
patron = '<li class="cat-item cat-item-.*?"><a href="([^"]+)".*?>([^<]+)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = host + scrapedurl
title = scrapedtitle.lower()
if title in tgenero:
thumbnail = tgenero[title.lower()]
else:
thumbnail = ''
itemactual = Item(channel=item.channel,
action='lista',
title=title, url=url,
thumbnail=thumbnail,
extra=item.extra
)
if title not in norep:
itemlist.append(itemactual)
norep.append(itemactual.title)
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="has-sub"><a href="([^"]+)"><span><b class="icon-bars"><\/b> ([^<]+)<\/span><\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
temp = 1
infoLabels = item.infoLabels
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
title = scrapedtitle.strip('')
contentSeasonNumber = temp
infoLabels['season'] = contentSeasonNumber
thumbnail = item.thumbnail
plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
itemlist.append(Item(channel=item.channel,
action="episodiosxtemp",
title=title,
fulltitle=item.title,
url=url,
thumbnail=thumbnail,
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber,
plot=plot,
infoLabels=infoLabels
))
temp = temp + 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
extra1=item.extra1,
temp=str(temp)
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
temp = 'temporada-' + str(item.contentSeasonNumber)
patron = '<li>.\s*<a href="(.*?)">.\s*<span.*?datex">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedepisode in matches:
url = host + scrapedurl
title = item.contentSerieName + ' ' + scrapedepisode
thumbnail = item.thumbnail
fanart = ''
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
fulltitle=item.fulltitle,
url=url,
thumbnail=item.thumbnail,
plot=item.plot,
extra=item.extra,
contentSerieName=item.contentSerieName
))
return itemlist
def episodiosxtemp(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
temp = 'temporada-' + str(item.contentSeasonNumber)
patron = '<li>.\s*<a href="(.*?-' + temp + '.*?)">.\s*<span.*?datex">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedepisode in matches:
url = host + scrapedurl
title = item.contentSerieName + ' ' + scrapedepisode
scrapedepisode = re.sub(r'.*?x', '', scrapedepisode)
infoLabels['episode'] = scrapedepisode
thumbnail = item.thumbnail
fanart = ''
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
fulltitle=item.fulltitle,
url=url,
thumbnail=item.thumbnail,
plot=item.plot,
extra=item.extra,
contentSerieName=item.contentSerieName,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def dec(encurl):
logger.info()
url = ''
encurl = encurl.translate(None, "',(,),;")
encurl = encurl.split('+')
for cod in encurl:
if cod in vars:
url = url + vars[cod]
else:
url = url + cod
return url
def findvideos(item):
logger.info()
itemlist = []
langs = dict()
data = httptools.downloadpage(item.url).data
patron = '<a onclick="return (play\d+).*?;"> (.*?) <\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for key, value in matches:
langs[key] = value.strip()
patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);'
matches = re.compile(patron, re.DOTALL).findall(data)
title = item.title
enlace = scrapertools.find_single_match(data,
'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"')
for scrapedlang, encurl in matches:
if 'e20fb34' in encurl:
url = dec(encurl)
url = url + enlace
else:
url = dec(encurl)
title = ''
server = ''
servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk',
'/jk':'streamcherry'}
server_id = re.sub(r'.*?embed|\.php.*', '', url)
if server_id and server_id in servers:
server = servers[server_id]
if (scrapedlang in langs) and langs[scrapedlang] in list_language:
language = IDIOMAS[langs[scrapedlang]]
else:
language = 'Latino'
#
# if langs[scrapedlang] == 'Latino':
# idioma = '[COLOR limegreen]LATINO[/COLOR]'
# elif langs[scrapedlang] == 'Sub Español':
# idioma = '[COLOR red]SUB[/COLOR]'
if item.extra == 'peliculas':
title = item.contentTitle + ' (' + server + ') ' + language
plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
else:
title = item.contentSerieName + ' (' + server + ') ' + language
plot = item.plot
thumbnail = servertools.guess_server_thumbnail(title)
if 'player' not in url and 'php' in url:
itemlist.append(item.clone(title=title,
url=url,
action="play",
plot=plot,
thumbnail=thumbnail,
server=server,
quality='',
language=language
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if 'your' in item.url:
item.url = 'http://www.yourupload.com/embed/' + scrapertools.find_single_match(data, 'src=".*?code=(.*?)"')
itemlist.append(item)
else:
itemlist = servertools.find_video_items(data=data)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
# categoria='peliculas'
try:
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host + 'search?q=animación'
elif categoria == 'terror':
item.url = host + 'search?q=terror'
item.extra = 'peliculas'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,110 +0,0 @@
{
"id": "estrenosgo",
"name": "EstrenosGo",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"fanart": "estrenosgo.png",
"thumbnail": "estrenosgo.png",
"banner": "estrenosgo.png",
"categories": [
"movie",
"tvshow",
"torrent",
"direct",
"vos"
],
"settings": [
{
"default": true,
"enabled": true,
"id": "include_in_global_search",
"label": "Incluir en busqueda global",
"type": "bool",
"visible": true
},
{
"default": true,
"enabled": true,
"id": "modo_grafico",
"label": "Buscar información extra (TMDB)",
"type": "bool",
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "emergency_urls",
"type": "list",
"label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?",
"default": 1,
"enabled": true,
"visible": true,
"lvalues": [
"No",
"Guardar",
"Borrar",
"Actualizar"
]
},
{
"id": "emergency_urls_torrents",
"type": "bool",
"label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?",
"default": true,
"enabled": true,
"visible": "!eq(-1,'No')"
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
},
{
"id": "seleccionar_ult_temporadda_activa",
"type": "bool",
"label": "Seleccionar para Videoteca si estará activa solo la última Temporada",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": false
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,41 +0,0 @@
{
"id": "fanpelis",
"name":"Fanpelis",
"thumbnail":"http://fanpelis.com/wp-content/uploads/2018/05/111.png",
"banner":"",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"version": 1,
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,300 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel Fanpelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
host = "https://fanpelis.com/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(
Item(channel=item.channel,
title="Peliculas",
action="sub_menu",
url=host + "movies/",
thumbnail=get_thumb('movies', auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Series",
action="sub_menu",
url=host + "series/",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Buscar",
action="search",
url=host,
thumbnail=get_thumb("search", auto=True)))
return itemlist
def sub_menu(item):
logger.info()
itemlist = list()
itemlist.append(
Item(channel=item.channel,
title="Ultimas",
action="list_all",
url=item.url,
thumbnail=get_thumb("last", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Generos",
action="categories",
url=host,
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(
Item(channel=item.channel,
title="Por Año",
action="categories",
url=host,
thumbnail=get_thumb('year', auto=True)
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def categories(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Generos':
patron = 'menu-item-object-category menu-item-\d+"><a href="([^"]+)">([^<]+)<'
else:
patron = 'menu-item-object-release-year menu-item-\d+"><a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel,
action="list_all",
title=title,
url=url
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
if texto != '':
item.texto = texto
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def list_all(item):
logger.info()
itemlist = []
if item.texto != '':
url = item.url + "?s=%s" % item.texto
else:
url = item.url
try:
data = get_source(url)
except:
return itemlist
data = data.replace("'", '"')
pattern = 'class="ml-item.*?"><a href="([^"]+)".*?oldtitle="([^"]+)".*?'
pattern += '<img data-original="([^"]+)".*?<div id(.*?)/a>'
matches = scrapertools.find_multiple_matches(data, pattern)
for url, title, thumb, info in matches:
year = scrapertools.find_single_match(info, 'rel="tag">(\d{4})<')
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumb,
infoLabels = {'year': year}
)
if 'series' in url:
new_item.action = 'seasons'
new_item.contentSerieName = title
else:
new_item.action = 'findvideos'
new_item.contentTitle = title
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
active_page = scrapertools.find_single_match(data, '<li class="active"><a class="">(\d+)</a>')
if item.texto != '':
next_page = host + 'page/%s/' % (int(active_page) + 1)
else:
next_page = item.url +'page/%s/' % (int (active_page) + 1)
if next_page:
url = urlparse.urljoin(host, next_page)
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=url,
texto=item.texto,
thumbnail=get_thumb("next.png")))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<strong>Temporada(\d+)</strong>'
matches = re.compile(patron, re.DOTALL).findall(data)
for temporada in matches:
title = 'Temporada %s' % temporada
contentSeasonNumber = temporada
item.infoLabels['season'] = contentSeasonNumber
itemlist.append(item.clone(action='episodesxseason',
title=title,
contentSeasonNumber=contentSeasonNumber
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
data = get_source(item.url)
data = scrapertools.find_single_match(data, '<strong>Temporada%s</strong>.*?</ul>' % season)
patron = '<a href="([^"]+)"><i class="fa fa-play"></i>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
ep = 1
for scrapedurl, scrapedtitle in matches:
epi = str(ep)
title = season + 'x%s - Episodio %s' % (epi, epi)
url = scrapedurl
contentEpisodeNumber = epi
item.infoLabels['episode'] = contentEpisodeNumber
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
))
ep += 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
import urllib
itemlist = []
data = get_source(item.url)
player = scrapertools.find_single_match(data, "({'action': 'movie_player','foobar_id':\d+,})")
post = eval(player)
post = urllib.urlencode(post)
data = httptools.downloadpage(host+'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.url}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<iframe src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
itemlist.append(Item(channel=item.channel, title='%s', url=url, action='play', infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
item = Item()
try:
if category == 'peliculas':
item.url = host + "movies/"
elif category == 'infantiles':
item.url = host + 'genre/animacion'
elif category == 'terror':
item.url = host + 'genre/terror'
itemlist = list_all(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -1,62 +0,0 @@
{
"id": "gmobi",
"name": "GNULA.mobi",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "https://gnula.mobi/wp-content/uploads/2018/12/gnula-logo.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VOSE"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -1,159 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa
# ------------------------------------------------------------
import re, urllib, urlparse
import base64
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools
from channels import filtertools
host = 'http://www.gnula.mobi'
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'ESP', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['rapidvideo', 'streamgo', 'openload']
list_quality = ['HD', 'BR-S', 'TS']
__channel__='gmobi'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = list()
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Novedades", action="lista", url=host + "/categorias/estrenos"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
itemlist.append(item.clone(title="Buscar", action="search"))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<a>CATEGORÍAS</a>(.*?)</ul>')
patron = '<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<article id="post-\d+".*?'
patron += '<a href="([^"]+)".*?'
patron += '<div class="Image">(.*?)</div>.*?'
patron += '"Title">([^"]+)</h2>.*?'
patron += '"Year">(\d+)</span>.*?'
patron += '<span class="Qlty">\w+ \(([^"]+)\)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, calidad in matches:
thumbnail = scrapertools.find_single_match(scrapedthumbnail, 'src="([^"]+)"')
scrapedtitle = scrapedtitle.replace("(%s)" % scrapedyear, "")
if not config.get_setting('unify'):
title = title = '%s [COLOR red] %s [/COLOR] (%s)' % (scrapedtitle, calidad , scrapedyear)
else:
title = ''
if not '>TV<' in scrapedthumbnail:
itemlist.append(item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=thumbnail,
contentTitle = scrapedtitle, quality=calidad, infoLabels={'year':scrapedyear}) )
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
if next_page_url != "":
next_page_url = next_page_url
itemlist.append(item.clone(action="lista", title="Siguiente >>", text_color="yellow",
url=next_page_url))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '"server":"[^"]+",'
patron += '"lang":"([^"]+)",'
patron += '"quality":"\w+ \(([^"]+)\)",'
patron += '"link":"https:.*?=([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for lang, quality, url in matches:
if lang in IDIOMAS:
lang = IDIOMAS[lang]
url = base64.b64decode(url + "==")
if not config.get_setting('unify'):
title = '[COLOR red] %s [/COLOR] (%s)' % (quality , lang)
else:
title = ''
itemlist.append(item.clone(action = "play", title = '%s'+ title, url = url, language=lang, quality=quality,
fulltitle = item.title))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' :
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -1,23 +0,0 @@
{
"id": "gnula",
"name": "Gnula",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "gnula.png",
"banner": "gnula.png",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,193 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = "http://gnula.nu/"
host_search = "https://cse.google.com/cse/element/v1?rsz=filtered_cse&num=20&hl=es&source=gcsc&gss=.es&sig=c891f6315aacc94dc79953d1f142739e&cx=014793692610101313036:vwtjajbclpq&q=%s&safe=off&cse_tok=%s&googlehost=www.google.com&callback=google.search.Search.csqr6098&nocache=1540313852177&start=0"
item_per_page = 20
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Estrenos", action="peliculas",
url= host +"peliculas-online/lista-de-peliculas-online-parte-1/", viewmode="movie",
thumbnail=get_thumb('premieres', auto=True), first=0))
itemlist.append(
Item(channel=item.channel, title="Generos", action="generos", url= host + "generos/lista-de-generos/",
thumbnail=get_thumb('genres', auto=True),))
itemlist.append(Item(channel=item.channel, title="Recomendadas", action="peliculas",
url= host + "peliculas-online/lista-de-peliculas-recomendadas/", viewmode="movie",
thumbnail=get_thumb('recomended', auto=True), first=0))
itemlist.append(Item(channel = item.channel, action = ""))
itemlist.append(
Item(channel=item.channel, title="Buscar", action="search", url = host_search,
thumbnail=get_thumb('search', auto=True),))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
data = httptools.downloadpage(host).data
cxv = scrapertools.find_single_match(data, 'cx" value="([^"]+)"')
data = httptools.downloadpage("https://cse.google.es/cse.js?hpg=1&cx=%s" %cxv).data
cse_token = scrapertools.find_single_match(data, 'cse_token": "([^"]+)"')
item.url = host_search %(texto, cse_token)
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def sub_search(item):
logger.info()
itemlist = []
while True:
data = httptools.downloadpage(item.url).data
if len(data) < 500 :
break
page = int(scrapertools.find_single_match(item.url, ".*?start=(\d+)")) + item_per_page
item.url = scrapertools.find_single_match(item.url, "(.*?start=)") + str(page)
patron = '(?s)clicktrackUrl":\s*".*?q=(.*?)".*?'
patron += 'title":\s*"([^"]+)".*?'
patron += '"src":\s*"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, ".*?online/")
scrapedtitle = scrapedtitle.decode("unicode-escape").replace(" online", "").replace("<b>", "").replace("</b>", "")
if "ver-" not in scrapedurl:
continue
year = scrapertools.find_single_match(scrapedtitle, "\d{4}")
contentTitle = scrapedtitle.replace(scrapertools.find_single_match('\[.+', scrapedtitle),"")
contentTitle = scrapedtitle.replace("(%s)" %year,"").replace("Ver","").strip()
itemlist.append(Item(action = "findvideos",
channel = item.channel,
contentTitle = contentTitle,
infoLabels = {"year":year},
title = scrapedtitle,
thumbnail = scrapedthumbnail,
url = scrapedurl,
))
tmdb.set_infoLabels_itemlist(itemlist, True)
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<spa[^>]+>Lista de g(.*?)/table')
patron = '<strong>([^<]+)</strong> .<a href="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for genero, scrapedurl in matches:
title = scrapertools.htmlclean(genero)
if not item.url.startswith("http"): scrapedurl = item.url + scrapedurl
itemlist.append(Item(channel = item.channel,
action = 'peliculas',
title = title,
url = scrapedurl,
viewmode = "movie",
first=0))
itemlist = sorted(itemlist, key=lambda item: item.title)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
next = True
data = httptools.downloadpage(item.url).data
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
matches = scrapertools.find_multiple_matches(data, patron)
first = item.first
last = first + 19
if last > len(matches):
last = len(matches)
next = False
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches[first:last]:
language = []
plot = scrapertools.htmlclean(resto).strip()
languages = scrapertools.find_multiple_matches(plot, r'\((V.)\)')
quality = scrapertools.find_single_match(plot, r'(?:\[.*?\].*?)\[(.*?)\]')
for lang in languages:
language.append(lang)
title = scrapedtitle + " " + plot
if not scrapedurl.startswith("http"):
scrapedurl = item.url + scrapedurl
year = scrapertools.find_single_match(scrapedurl, "\-(\d{4})\-")
contentTitle = scrapedtitle.replace(scrapertools.find_single_match('\[.+', scrapedtitle),"")
itemlist.append(Item(action = 'findvideos',
channel = item.channel,
contentTitle = scrapedtitle,
infoLabels = {"year":year},
language=language,
plot = plot,
quality=quality,
title = title,
thumbnail = scrapedthumbnail,
url = scrapedurl
))
tmdb.set_infoLabels_itemlist(itemlist, True)
#paginacion
url_next_page = item.url
first = last
if next:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='peliculas', first=first))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
#item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
#item.plot = scrapertools.htmlclean(item.plot).strip()
#item.contentPlot = item.plot
patron = '<strong>Ver película online.*?>.*?>([^<]+)'
scrapedopcion = scrapertools.find_single_match(data, patron)
titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper()
bloque = scrapertools.find_multiple_matches(data, 'contenedor_tab.*?/table')
cuenta = 0
for datos in bloque:
cuenta = cuenta + 1
patron = '<em>((?:opción|opción) %s.*?)</em>' %cuenta
scrapedopcion = scrapertools.find_single_match(data, patron)
titulo_opcion = "(" + scrapertools.find_single_match(scrapedopcion, "op.*?, (.*)").upper() + ")"
if "TRAILER" in titulo_opcion or titulo_opcion == "()":
titulo_opcion = "(" + titulo_opcional + ")"
urls = scrapertools.find_multiple_matches(datos, '(?:src|href)="([^"]+)')
titulo = "Ver en %s " + titulo_opcion
for url in urls:
itemlist.append(item.clone(action = "play",
title = titulo,
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
#tmdb.set_infoLabels_itemlist(itemlist, True)
if itemlist:
if config.get_videolibrary_support():
itemlist.append(Item(channel = item.channel, action = ""))
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.contentTitle
))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -1,173 +0,0 @@
{
"id": "gnula_biz",
"name": "Gnula.Biz",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "gnula_biz.png",
"banner": "gnula_biz.png",
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE",
"VO"
]
},
{
"id": "save_last_search",
"type": "bool",
"label": "Guardar última búsqueda",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - Series",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1",
"Ninguno"
]
},
{
"id": "menu_info",
"type": "bool",
"label": "Mostrar menú intermedio película/episodio",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "last_page",
"type": "bool",
"label": "Ocultar opción elegir página en películas (Kodi)",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "filtro_defecto_peliculas",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_peliculas1",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_peliculas2",
"type": "label",
"enabled": true,
"visible": false
},
{
"pers_peliculas3": {
"type": "label",
"enabled": true,
"visible": false
}
},
{
"id": "filtro_defecto_series",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_series1",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_series2",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_series3",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "last_search",
"type": "text",
"enabled": true,
"visible": false
}
]
}

View File

@@ -1,790 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
IDIOMAS = {'Latino': 'LAT', 'Castellano':'CAST', 'Vo':'VO', 'Vose': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'powvideo', 'rapidvideo', 'streamango', 'streamcloud', 'flashx', 'gamovideo', 'streamplay']
__modo_grafico__ = config.get_setting('modo_grafico', 'gnula_biz')
__perfil__ = int(config.get_setting('perfil', "gnula_biz"))
__menu_info__ = config.get_setting('menu_info', 'gnula_biz')
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'gnula_biz')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'gnula_biz')
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://gnula.biz"
def mainlist(item):
logger.info()
item.text_color = color1
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(action="seccion_peliculas", title="Películas", fanart="http://i.imgur.com/PjJaW8o.png",
url=host + "/catalogue?type=peliculas", thumbnail=get_thumb('movies', auto=True)))
# Seccion series
itemlist.append(item.clone(action="seccion_series", title="Series",
url=host + "/ultimos-capitulos", fanart="http://i.imgur.com/9loVksV.png",
thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(item.clone(action="peliculas", title="Documentales", fanart="http://i.imgur.com/Q7fsFI6.png",
url=host + "/catalogue?type=peliculas&genre=documental",
thumbnail=get_thumb('documentaries', auto=True)))
if config.get_setting("adult_mode") != 0:
itemlist.append(item.clone(action="peliculas", title="Sección Adultos +18",
url=host + "/catalogue?type=adultos",
fanart="http://i.imgur.com/kIvE1Zh.png", thumbnail=get_thumb('adults', auto=True)))
itemlist.append(item.clone(title="Buscar...", action="local_search", thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search?q=%s" % texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def local_search(item):
logger.info()
text = ""
if config.get_setting("save_last_search", item.channel):
text = config.get_setting("last_search", item.channel)
from platformcode import platformtools
texto = platformtools.dialog_input(default=text, heading="Buscar en Gnula.Biz")
if texto is None:
return
if config.get_setting("save_last_search", item.channel):
config.set_setting("last_search", texto, item.channel)
return search(item, texto)
def busqueda(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="poster-media-card">(.*?)(?:<li class="search-results-item media-item">|<footer>)'
bloque = scrapertools.find_multiple_matches(data, patron)
for match in bloque:
patron = 'href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?' \
'<p class="search-results-main-info">.*?del año (\d+).*?' \
'p class.*?>(.*?)<'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, year, plot in matches:
scrapedtitle = scrapedtitle.capitalize()
item.infoLabels["year"] = year
plot = scrapertools.htmlclean(plot)
new_item = Item(channel=item.channel, thumbnail= scrapedthumbnail, plot=plot)
if "/serie/" in scrapedurl:
new_item.show = scrapedtitle
new_item.contentType = 'tvshow'
scrapedurl += "/episodios"
title = " [Serie]"
new_item.action = 'episodios'
elif "/pelicula/" in scrapedurl:
new_item.action = "findvideos"
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
new_item.contentType = "movie"
new_item.extra='media'
new_item.contentTitle= scrapedtitle
new_item.infoLabels['filtro'] = filter_list
else:
continue
new_item.title = scrapedtitle + " (" + year + ")"
new_item.url = scrapedurl
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Más resultados')
if next_page != "":
next_page = urlparse.urljoin(host, next_page)
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Siguiente", url=next_page,
thumbnail=item.thumbnail, text_color=color3))
return itemlist
def filtro(item):
logger.info()
list_controls = []
valores = {}
strings = {}
# Se utilizan los valores por defecto/guardados o los del filtro personalizado
if not item.values:
valores_guardados = config.get_setting("filtro_defecto_" + item.extra, item.channel)
else:
valores_guardados = item.values
item.values = ""
if valores_guardados:
dict_values = valores_guardados
else:
dict_values = None
if dict_values:
dict_values["filtro_per"] = 0
excluidos = ['País', 'Películas', 'Series', 'Destacar']
data = httptools.downloadpage(item.url).data
matches = scrapertools.find_multiple_matches(data, '<div class="dropdown-sub[^>]+>(\S+)(.*?)</ul>')
i = 0
for filtro_title, values in matches:
if filtro_title in excluidos: continue
filtro_title = filtro_title.replace("Tendencia", "Ordenar por")
id = filtro_title.replace("Género", "genero").replace("Año", "year").replace(" ", "_").lower()
list_controls.append({'id': id, 'label': filtro_title, 'enabled': True,
'type': 'list', 'default': 0, 'visible': True})
valores[id] = []
valores[id].append('')
strings[filtro_title] = []
list_controls[i]['lvalues'] = []
if filtro_title == "Ordenar por":
list_controls[i]['lvalues'].append('Más recientes')
strings[filtro_title].append('Más recientes')
else:
list_controls[i]['lvalues'].append('Cualquiera')
strings[filtro_title].append('Cualquiera')
patron = '<li>.*?(?:genre|release|quality|language|order)=([^"]+)">([^<]+)<'
matches_v = scrapertools.find_multiple_matches(values, patron)
for value, key in matches_v:
if value == "action-adventure": continue
list_controls[i]['lvalues'].append(key)
valores[id].append(value)
strings[filtro_title].append(key)
i += 1
item.valores = valores
item.strings = strings
if "Filtro Personalizado" in item.title:
return filtrado(item, valores_guardados)
list_controls.append({'id': 'espacio', 'label': '', 'enabled': False,
'type': 'label', 'default': '', 'visible': True})
list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True,
'type': 'bool', 'default': False, 'visible': True})
list_controls.append({'id': 'filtro_per', 'label': 'Guardar filtro en acceso directo...', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No guardar', 'Filtro 1',
'Filtro 2', 'Filtro 3']})
list_controls.append({'id': 'remove', 'label': 'Eliminar filtro personalizado...', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No eliminar', 'Filtro 1',
'Filtro 2', 'Filtro 3']})
from platformcode import platformtools
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption="Filtra los resultados", item=item, callback='filtrado')
def filtrado(item, values):
values_copy = values.copy()
# Guarda el filtro para que sea el que se cargue por defecto
if "save" in values and values["save"]:
values_copy.pop("remove")
values_copy.pop("filtro_per")
values_copy.pop("save")
config.set_setting("filtro_defecto_" + item.extra, values_copy, item.channel)
# Elimina el filtro personalizado elegido
if "remove" in values and values["remove"] != 0:
config.set_setting("pers_" + item.extra + str(values["remove"]), "", item.channel)
values_copy = values.copy()
# Guarda el filtro en un acceso directo personalizado
if "filtro_per" in values and values["filtro_per"] != 0:
index = item.extra + str(values["filtro_per"])
values_copy.pop("filtro_per")
values_copy.pop("save")
values_copy.pop("remove")
config.set_setting("pers_" + index, values_copy, item.channel)
genero = item.valores["genero"][values["genero"]]
year = item.valores["year"][values["year"]]
calidad = item.valores["calidad"][values["calidad"]]
idioma = item.valores["idioma"][values["idioma"]]
order = item.valores["ordenar_por"][values["ordenar_por"]]
strings = []
for key, value in dict(item.strings).items():
key2 = key.replace("Género", "genero").replace("Año", "year").replace(" ", "_").lower()
strings.append(key + ": " + value[values[key2]])
item.valores = "Filtro: " + ", ".join(sorted(strings))
item.strings = ""
item.url = host + "/catalogue?type=%s&genre=%s&release=%s&quality=%s&language=%s&order=%s" % \
(item.extra, genero, year, calidad, idioma, order)
return globals()[item.extra](item)
def seccion_peliculas(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="peliculas", title="Novedades", fanart="http://i.imgur.com/PjJaW8o.png",
url=host + "/catalogue?type=peliculas", thumbnail=get_thumb('newest', auto=True)))
itemlist.append(item.clone(action="peliculas", title="Estrenos",
url=host + "/estrenos-de-cine", fanart="http://i.imgur.com/PjJaW8o.png",
thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(item.clone(action="filtro", title="Filtrar películas", extra="peliculas",
url=host + "/catalogue?type=peliculas",
fanart="http://i.imgur.com/PjJaW8o.png"))
# Filtros personalizados para peliculas
for i in range(1, 4):
filtros = config.get_setting("pers_peliculas" + str(i), item.channel)
if filtros:
title = "Filtro Personalizado " + str(i)
new_item = item.clone()
new_item.values = filtros
itemlist.append(new_item.clone(action="filtro", title=title, fanart="http://i.imgur.com/PjJaW8o.png",
url=host + "/catalogue?type=peliculas", extra="peliculas"))
itemlist.append(item.clone(action="mapa", title="Mapa de películas", extra="peliculas",
url=host + "/mapa-de-peliculas",
fanart="http://i.imgur.com/PjJaW8o.png"))
return itemlist
def seccion_series(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="ultimos", title="Últimos capítulos",
url=host + "/ultimos-capitulos", fanart="http://i.imgur.com/9loVksV.png",
thumbnail=get_thumb('new episodes', auto=True)))
itemlist.append(item.clone(action="series", title="Series recientes",
url=host + "/catalogue?type=series",
fanart="http://i.imgur.com/9loVksV.png", thumbnail=get_thumb('recents', auto=True)))
itemlist.append(item.clone(action="filtro", title="Filtrar series", extra="series",
url=host + "/catalogue?type=series",
fanart="http://i.imgur.com/9loVksV.png"))
# Filtros personalizados para series
for i in range(1, 4):
filtros = config.get_setting("pers_series" + str(i), item.channel)
if filtros:
title = " Filtro Personalizado " + str(i)
new_item = item.clone()
new_item.values = filtros
itemlist.append(new_item.clone(action="filtro", title=title, fanart="http://i.imgur.com/9loVksV.png",
url=host + "/catalogue?type=series", extra="series"))
itemlist.append(item.clone(action="mapa", title="Mapa de series", extra="series",
url=host + "/mapa-de-series",
fanart="http://i.imgur.com/9loVksV.png"))
return itemlist
def mapa(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="sitemap-initial"> <a class="initial-link" href="([^"]+)">(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + scrapedurl
scrapedtitle = scrapedtitle.capitalize()
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action=item.extra, extra="mapa"))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
if "valores" in item and item.valores:
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
action = "findvideos"
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data,
'<div class="media-card "(.*?)<div class="hidden-info">')
for match in bloque:
if item.extra == "mapa":
patron = '.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(host, scrapedurl)
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
text_color=color2, contentType="movie", infoLabels={'filtro':filter_list}))
else:
patron = '<div class="audio-info">(.*?)<div (class="quality.*?)src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for idiomas, calidad, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
calidad = scrapertools.find_single_match(calidad, '<div class="quality-info".*?>([^<]+)</div>')
if calidad:
calidad = calidad.capitalize().replace("Hd", "HD")
audios = []
if "medium-es" in idiomas: audios.append('CAST')
if "medium-vs" in idiomas: audios.append('VOSE')
if "medium-la" in idiomas: audios.append('LAT')
if "medium-en" in idiomas or 'medium-"' in idiomas:
audios.append('VO')
title = "%s [%s]" % (scrapedtitle, "/".join(audios))
if calidad:
title += " (%s)" % calidad
url = urlparse.urljoin(host, scrapedurl)
filter_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, extra="media",
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
text_color=color2, contentType="movie", quality=calidad, language=audios,
infoLabels={'filtro':filter_list}))
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if next_page != "" and item.title != "":
itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Siguiente", url=next_page,
thumbnail=item.thumbnail, extra=item.extra, text_color=color3))
if not config.get_setting("last_page", item.channel) and config.is_xbmc():
itemlist.append(Item(channel=item.channel, action="select_page", title="Ir a página...", url=next_page,
thumbnail=item.thumbnail, text_color=color5))
return itemlist
def ultimos(item):
logger.info()
item.text_color = color2
action = "episodios"
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data, '<div class="media-card "(.*?)<div class="info-availability '
'one-line">')
for match in bloque:
patron = '<div class="audio-info">(.*?)<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for idiomas, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
show = re.sub(r'(\s*[\d]+x[\d]+\s*)', '', scrapedtitle)
audios = []
if "medium-es" in idiomas: audios.append('CAST')
if "medium-vs" in idiomas: audios.append('VOSE')
if "medium-la" in idiomas: audios.append('LAT')
if "medium-en" in idiomas or 'medium-"' in idiomas:
audios.append('VO')
title = "%s - %s" % (show, re.sub(show, '', scrapedtitle))
if audios:
title += " [%s]" % "/".join(audios)
url = urlparse.urljoin(host, scrapedurl)
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=scrapedthumbnail,
contentSerieName=show, fulltitle=show, show=show,
text_color=color2, extra="ultimos", contentType="tvshow"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
if next_page != "":
itemlist.append(item.clone(action="ultimos", title=">> Siguiente", url=next_page, text_color=color3))
return itemlist
def series(item):
logger.info()
itemlist = []
if "valores" in item:
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_multiple_matches(data, '<div class="media-card "(.*?)<div class="hidden-info">')
for match in bloque:
patron = '<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(match, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(host, scrapedurl + "/episodios")
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, contentSerieName=scrapedtitle, fulltitle=scrapedtitle,
show=scrapedtitle, text_color=color2, contentType="tvshow"))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
except:
pass
next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
if next_page != "":
title = ">> Siguiente - Página " + scrapertools.find_single_match(next_page, 'page=(\d+)')
itemlist.append(Item(channel=item.channel, action="series", title=title, url=next_page,
thumbnail=item.thumbnail, extra=item.extra, text_color=color3))
return itemlist
def menu_info(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
if year != "" and not "tmdb_id" in item.infoLabels:
try:
from core import tmdb
item.infoLabels["year"] = year
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if item.infoLabels["plot"] == "":
sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
id = scrapertools.find_single_match(item.url, '/(\d+)/')
data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id, ignore_response_code=True).data
try:
trailer_url = jsontools.load(data_trailer)["video"]["url"]
if trailer_url != "": item.infoLabels["trailer"] = trailer_url
except:
pass
title = "Ver enlaces %s - [" + item.contentTitle + "]"
itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="media", type="streaming"))
itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="media", type="download"))
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail,
fanart=item.fanart, fulltitle=item.fulltitle,
extra="media|"))
return itemlist
def episodios(item):
logger.info()
itemlist = []
if item.extra == "ultimos":
data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
item.url += "/episodios"
data = httptools.downloadpage(item.url).data
action = "findvideos"
patron = '<div class="ep-list-number">.*?href="([^"]+)">([^<]+)</a>.*?<span class="name">([^<]+)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, episode, scrapedtitle in matches:
new_item = item.clone(action=action, url=scrapedurl, text_color=color2, contentType="episode")
new_item.contentSeason = episode.split("x")[0]
new_item.contentEpisodeNumber = episode.split("x")[1]
new_item.title = episode + " - " + scrapedtitle
new_item.extra = "episode"
if "episodios" in item.extra or item.path:
new_item.extra = "episode|"
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist.reverse()
if "episodios" not in item.extra and not item.path:
id = scrapertools.find_single_match(item.url, '/(\d+)/')
data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id, ignore_response_code=True).data
try:
trailer_url = jsontools.load(data_trailer)["video"]["url"]
if trailer_url != "": item.infoLabels["trailer"] = trailer_url
except:
pass
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta"))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, action="add_serie_to_library", text_color=color5,
title="Añadir serie a la videoteca", show=item.show, thumbnail=item.thumbnail,
url=item.url, fulltitle=item.fulltitle, fanart=item.fanart,
extra="episodios###episodios",
contentTitle=item.fulltitle))
return itemlist
def menu_info_episode(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.show == "":
item.show = scrapertools.find_single_match(data, 'class="h1-like media-title".*?>([^<]+)</a>')
episode = scrapertools.find_single_match(data, '<span class="indicator">([^<]+)</span>')
item.infoLabels["season"] = episode.split("x")[0]
item.infoLabels["episode"] = episode.split("x")[1]
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if item.infoLabels["plot"] == "":
sinopsis = scrapertools.find_single_match(data, 'id="episode-plot">(.*?)</p>')
if not "No hay sinopsis" in sinopsis:
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
title = "Ver enlaces %s - [" + item.show + " " + episode + "]"
itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="episode", type="streaming"))
itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="episode", type="download"))
siguiente = scrapertools.find_single_match(data, '<a class="episode-nav-arrow next" href="([^"]+)" title="([^"]+)"')
if siguiente:
titulo = ">> Siguiente Episodio - [" + siguiente[1] + "]"
itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=siguiente[0], extra="",
text_color=color1))
patron = '<a class="episode-nav-arrow previous" href="([^"]+)" title="([^"]+)"'
anterior = scrapertools.find_single_match(data, patron)
if anterior:
titulo = "<< Episodio Anterior - [" + anterior[1] + "]"
itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=anterior[0], extra="",
text_color=color3))
url_serie = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
url_serie += "/episodios"
itemlist.append(item.clone(title="Ir a la lista de capítulos", action="episodios", url=url_serie, extra="",
text_color=color4))
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if not "|" in item.extra and not __menu_info__:
data = httptools.downloadpage(item.url, add_referer=True).data
year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
if year != "" and not "tmdb_id" in item.infoLabels:
try:
from core import tmdb
item.infoLabels["year"] = year
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if item.infoLabels["plot"] == "":
sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
id = scrapertools.find_single_match(item.url, '/(\d+)/')
if id == "":
data = httptools.downloadpage(item.url, add_referer=True).data
id = scrapertools.find_single_match(str(data), 'data-idm="(.*?)"')
if "|" in item.extra or not __menu_info__:
extra = item.extra
if "|" in item.extra:
extra = item.extra[:-1]
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "streaming")
itemlist=(get_enlaces(item, url, "Online"))
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, extra, "download")
itemlist.extend(get_enlaces(item, url, "de Descarga"))
if extra == "media":
data_trailer = httptools.downloadpage(host + "/media/trailer?idm=%s&mediaType=1" % id, ignore_response_code=True).data
try:
trailer_url = jsontools.load(data_trailer)["video"]["url"]
if trailer_url != "": item.infoLabels["trailer"] = trailer_url
except:
pass
title = "Ver enlaces %s - [" + item.contentTitle + "]"
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
if config.get_videolibrary_support() and not "|" in item.extra:
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail,
fanart=item.fanart, fulltitle=item.fulltitle,
extra="media|"))
else:
url = host + "/sources/list?id=%s&type=%s&order=%s" % (id, item.extra, item.type)
type = item.type.replace("streaming", "Online").replace("download", "de Descarga")
itemlist.extend(get_enlaces(item, url, type))
if __comprueba_enlaces__:
for it in itemlist:
if it.server != '' and it.url != '':
it.url = normalizar_url(it.url, it.server)
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def normalizar_url(url, server):
# Pasar por findvideosbyserver para para obtener url a partir de los pattern/url de los json de servidores
# Excepciones copiadas de la funcion play
url = url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
enlaces = servertools.findvideosbyserver(url, server)[0]
if enlaces[1] != '':
return enlaces[1]
return url
def get_enlaces(item, url, type):
itemlist = []
data = httptools.downloadpage(url, add_referer=True).data
if type == "Online":
gg = httptools.downloadpage(item.url, add_referer=True).data
bloque = scrapertools.find_single_match(gg, 'class="tab".*?button show')
patron = 'a href="#([^"]+)'
patron += '.*?language-ES-medium ([^"]+)'
patron += '.*?</i>([^<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedopcion, scrapedlanguage, scrapedcalidad in matches:
google_url = scrapertools.find_single_match(bloque, 'id="%s.*?src="([^"]+)' % scrapedopcion)
if "medium-es" in scrapedlanguage: language = "CAST"
if "medium-en" in scrapedlanguage: language = "VO"
if "medium-vs" in scrapedlanguage: language = "VOSE"
if "medium-la" in scrapedlanguage: language = "LAT"
titulo = " [%s/%s]" % (language, scrapedcalidad.strip())
itemlist.append(
item.clone(action="play", url=google_url, title=" Ver en Gvideo" + titulo, text_color=color2,
extra="", server="gvideo", language=language, quality=scrapedcalidad.strip()))
patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \
'.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
if matches:
for scrapedurl, idioma, server, calidad in matches:
if server == "streamin": server = "streaminto"
if server == "waaw" or server == "miracine": server = "netutv"
if server == "ul": server = "uploadedto"
if server == "player": server = "vimpleru"
if servertools.is_server_enabled(server):
scrapedtitle = " Ver en " + server.capitalize() + " [" + idioma + "/" + calidad + "]"
itemlist.append(item.clone(action="play", url=scrapedurl, title=scrapedtitle, text_color=color2,
extra="", server=server, language=IDIOMAS[idioma]))
if len(itemlist) == 1:
itemlist.append(item.clone(title=" No hay enlaces disponibles", action="", text_color=color2))
return itemlist
def play(item):
logger.info()
itemlist = []
if item.extra != "" and "google" not in item.url:
post = "id=%s" % item.extra
data = httptools.downloadpage(host + "/goto/", post=post, add_referer=True).data
item.url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"')
url = item.url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
if item.server:
enlaces = servertools.findvideosbyserver(url, item.server)[0]
else:
enlaces = servertools.findvideos(url)[0]
itemlist.append(item.clone(url=enlaces[1], server=enlaces[2]))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host + "/catalogue?type=peliculas"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
if categoria == "series":
item.url = host + "/ultimos-capitulos"
item.action = "ultimos"
itemlist = ultimos(item)
if itemlist[-1].action == "ultimos":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def select_page(item):
import xbmcgui
dialog = xbmcgui.Dialog()
number = dialog.numeric(0, "Introduce el número de página")
if number != "":
item.url = re.sub(r'page=(\d+)', "page=" + number, item.url)
return peliculas(item)

View File

@@ -1,79 +0,0 @@
{
"id": "goovie",
"name": "Goovie",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "https://s15.postimg.cc/n9wp4nnzv/goovie.png",
"banner": "",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"VOSE",
"VO"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -1,311 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel Goovie -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'EspaL':'Cast', 'LatinoL':'Lat', 'SubL':'VOSE', 'OriL':'VO'}
list_language = IDIOMAS.values()
CALIDADES = {'1080p':'1080','720p':'720','480p':'480','360p':'360'}
list_quality = ['1080', '720', '480', '360']
list_servers = [
'powvideo'
]
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'goovie')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'goovie')
host = 'https://goovie.co/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='sub_menu', type='peliculas',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', action='sub_menu', type='series',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title='Colecciones', action='list_collections',
url= host+'listas=populares', thumbnail=get_thumb('colections', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'search?go=', thumbnail=get_thumb("search", auto=True),
extra='movie'))
autoplay.show_option(item.channel, itemlist)
return itemlist
def sub_menu(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + item.type, action='list_all',
thumbnail=get_thumb('all', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type=item.type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
for lang in lang_list:
if lang == 'en':
lang = 'vose'
if lang not in language:
language.append(lang)
return language
def section(item):
logger.info()
itemlist=[]
data = get_source(host+item.type)
if 'Genero' in item.title:
data = scrapertools.find_single_match(data, 'Generos.*?</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(data, 'Años.*?</ul>')
patron = '<li onclick="filter\(this, \'([^\']+)\', \d+\);">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
title = scrapedtitle
if r'\d+' in scrapedtitle:
url = '%s%s/filtro/,/%s,' % (host, item.type, title)
else:
url = '%s%s/filtro/%s,/,' % (host, item.type, title)
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all',
type=item.type))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article class="Item"><a href="([^>]+)"><div class="Poster"><img src="([^"]+)".*?'
patron += '<h2>([^>]+)</h2>.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
thumbnail = scrapedthumbnail.strip()
url = scrapedurl
filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w154", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'filtro':filter_list})
if item.type == 'peliculas' or 'peliculas' in url:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
new_item.action = 'seasons'
new_item.contentSerieName = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def list_collections(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li><a href="([^"]+)">.*?"first-lIMG"><img src="([^"]+)">.*?<h2>([^<]+)</h2>.*?Fichas:?\s(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumb, title, cant in matches:
plot = 'Contiene %s elementos' % cant
itemlist.append(Item(channel=item.channel, action='list_all', title=title, url=url, thumbnail=thumb, plot=plot))
url_next_page = scrapertools.find_single_match(data, 'class="PageActiva">\d+</a><a href="([^"]+)"')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_collections'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<div class="season temporada-(\d+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron= '<li><a href="([^"]+)"><b>%s - (\d+)</b><h2 class="eTitle">([^>]+)</h2>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for url, scrapedepisode, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = data.replace('"', "'")
patron = "onclick='clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);'>.*?<b>([a-zA-Z]+)"
matches = re.compile(patron, re.DOTALL).findall(data)
headers = {'referer': item.url}
for url, quality, language, server in matches:
if url != '':
language = IDIOMAS[language]
if quality.lower() == 'premium':
quality = '720p'
quality = CALIDADES[quality]
title = ' [%s] [%s]' % (language, quality)
if 'visor/vdz' in url:
server = 'powvideo'
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language,
quality=quality, server=server, headers=headers, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return sorted(itemlist, key=lambda i: i.language)
def play(item):
from lib.generictools import privatedecrypt
logger.info()
itemlist = []
url = ''
item.server = ''
data = httptools.downloadpage(item.url, headers=item.headers, follow_redirects=False)
if 'visor/vdz' in item.url:
url = scrapertools.find_single_match(data.data, 'IFRAME SRC="([^"]+)"')
elif 'visor/if' in item.url:
url = data.headers['location']
itemlist.append(Item(channel=item.channel, url=url, action='play', server=item.server,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'peliculas'
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'peliculas'
elif categoria == 'infantiles':
item.url = host + 'peliculas/filtro/Animación,/,'
elif categoria == 'terror':
item.url = host + 'peliculas/filtro/Terror,/,'
item.type='peliculas'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,126 +0,0 @@
{
"id": "grantorrent",
"name": "GranTorrent",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "grantorrent.jpg",
"banner": "grantorrent.png",
"fanart": "grantorrent.png",
"categories": [
"torrent",
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra (TMDB)",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "emergency_urls",
"type": "list",
"label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?",
"default": 1,
"enabled": true,
"visible": true,
"lvalues": [
"No",
"Guardar",
"Borrar",
"Actualizar"
]
},
{
"id": "emergency_urls_torrents",
"type": "bool",
"label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?",
"default": true,
"enabled": true,
"visible": "!eq(-1,'No')"
},
{
"id": "seleccionar_serie_temporada",
"type": "list",
"label": "Seleccionar agrupar por Serie o Temporada",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Temporada",
"Serie"
]
},
{
"id": "seleccionar_ult_temporadda_activa",
"type": "bool",
"label": "Seleccionar para Videoteca si estará activa solo la última Temporada",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 15,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15"
]
},
{
"id": "include_in_newest_4k",
"type": "bool",
"label": "Incluir en Novedades - 4K",
"default": true,
"enabled": true,
"visible": true
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,62 +0,0 @@
{
"id": "hdfilmologia",
"name": "HDFilmologia",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast", "vose"],
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
"thumbnail": "https://hdfilmologia.com/templates/hdfilmologia/images/logo.png",
"banner": "",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"English"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,270 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel HDFilmologia -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import sys
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "hdfilmologia"
host = "https://hdfilmologia.com/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Últimas Agregadas", action="movies", thumbnail=get_thumb('last', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'index.php?do=lastnews', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="movies", thumbnail=get_thumb('premieres', auto=True),
text_blod=True, page=0, viewcontent='movies', url=host + 'estrenos',
viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Más Vistas", action="movies", thumbnail=get_thumb('more watched', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host + 'mas-vistas/', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por País", action="countriesYears", thumbnail=get_thumb('country',
auto=True), text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Películas Por Año", action="countriesYears", thumbnail=get_thumb('year', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Géneros", action="genres", thumbnail=get_thumb('genres', auto=True),
text_blod=True, page=0, viewcontent='movies',
url=host, viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
text_blod=True, url=host, page=0))
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?do=search&mode=advanced&subaction=search&story={0}".format(texto))
# 'https://hdfilmologia.com/?do=search&mode=advanced&subaction=search&story=la+sombra'
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="sres-wrap clearfix" href="([^"]+)">' # url
patron += '<div class="sres-img"><img src="/([^"]+)" alt="([^"]+)" />.*?' # img, title
patron += '<div class="sres-desc">(.*?)</div>' # plot
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, plot in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
action="findvideos", text_color=color3, page=0, plot=plot,
thumbnail=host + scrapedthumbnail))
pagination = scrapertools.find_single_match(data, 'class="pnext"><a href="([^"]+)">')
if pagination:
itemlist.append(Item(channel=__channel__, action="sub_search",
title="» Siguiente »", url=pagination))
tmdb.set_infoLabels(itemlist)
return itemlist
def movies(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '<div class="kino-item ignore-select">.*?<a href="([^"]+)" class="kino-h"><h2>([^<]+)</h2>.*?' # url, title
patron += '<img src="([^"]+)".*?' # img
patron += '<div class="k-meta qual-mark">([^<]+)</div>.*?' # quality
patron += '<strong>Año:</strong></div>([^<]+)</li>' # year
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, year in matches[item.page:item.page + 25]:
scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle, quality)
itemlist.append(Item(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'year': year.strip()},
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer"))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.page + 25 < len(matches):
itemlist.append(item.clone(page=item.page + 25,
title="» Siguiente »", text_color=color3))
else:
next_page = scrapertools.find_single_match(
data, 'class="pnext"><a href="([^"]+)">')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title="» Siguiente »", text_color=color3))
return itemlist
def genres(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=__channel__, action="movies", title=scrapedtitle,
url=host + scrapedurl, text_color=color3, viewmode="movie_with_plot"))
return itemlist
def countriesYears(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
if item.title == "Películas Por País":
patron_todas = 'Por País</option>(.*?)</option></select>'
else:
patron_todas = 'Por Año</option>(.*?)<option value="/">Peliculas'
data = scrapertools.find_single_match(data, patron_todas)
patron = '<option value="/([^"]+)">([^<]+)</option>' # url, title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=host + scrapedurl, action="movies"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '>([^<]+)</a></li><li><a class="src_tab" id="[^"]+" data-src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang, url in matches:
lang = re.sub(r"1|2|3|4", "", lang)
server = servertools.get_server_from_url(url)
if 'dropbox' in url:
server = 'dropbox'
if '/drive/' in url:
data = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
server = 'gdrive'
if 'ultrapeliculashd' in url:
data = httptools.downloadpage(url).data
# logger.info(data)
patron = "\|s\|(\w+)\|"
matches = re.compile(patron, re.DOTALL).findall(data)
for key in matches:
url = 'https://www.dropbox.com/s/%s?dl=1' % (key)
server = 'dropbox'
languages = {'Latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'Castellano': '[COLOR green](CAST)[/COLOR]',
'Subtitulado': '[COLOR red](VOS)[/COLOR]'}
if lang in languages:
lang = languages[lang]
title = "Ver en: [COLOR yellow](%s)[/COLOR] [COLOR yellowgreen]%s[/COLOR]" % (server.title(), lang)
if 'youtube' not in server:
itemlist.append(item.clone(action='play', url=url, title=title, language=lang,
text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,38 +0,0 @@
{
"id": "hdfull",
"name": "HDFull",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "hdfull.png",
"banner": "hdfull.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "hdfulluser",
"type": "text",
"label": "@30014",
"enabled": true,
"visible": true
},
{
"id": "hdfullpassword",
"type": "text",
"label": "@30015",
"hidden": true,
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,726 +0,0 @@
# -*- coding: utf-8 -*-
import base64
import re
import urllib
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from channelselector import get_thumb
host = "https://hdfull.me"
if config.get_setting('hdfulluser', 'hdfull'):
account = True
else:
account = False
def settingCanal(item):
return platformtools.show_channel_settings()
def login():
logger.info()
data = agrupa_datos(httptools.downloadpage(host).data)
patron = "<input type='hidden' name='__csrf_magic' value=\"([^\"]+)\" />"
sid = scrapertools.find_single_match(data, patron)
post = urllib.urlencode({'__csrf_magic': sid}) + "&username=" + config.get_setting('hdfulluser',
'hdfull') + "&password=" + config.get_setting(
'hdfullpassword', 'hdfull') + "&action=login"
httptools.downloadpage(host, post=post)
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="menupeliculas", title="Películas", url=host, folder=True,
thumbnail=get_thumb('movies', auto=True),))
itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url=host, folder=True,
thumbnail=get_thumb('tvshows', auto=True),))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
thumbnail=get_thumb('search', auto=True),))
if not account:
itemlist.append(Item(channel=item.channel, title="[COLOR orange][B]Habilita tu cuenta para activar los items de usuario...[/B][/COLOR]",
action="settingCanal", url=""))
else:
login()
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
return itemlist
def menupeliculas(item):
logger.info()
itemlist = []
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Favoritos[/B][/COLOR]",
url=host + "/a/my?target=movies&action=favorite&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Pendientes[/B][/COLOR]",
url=host + "/a/my?target=movies&action=pending&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="fichas", title="ABC", url=host + "/peliculas/abc", folder=True))
itemlist.append(
Item(channel=item.channel, action="fichas", title="Últimas películas", url=host + "/peliculas", folder=True))
itemlist.append(
Item(channel=item.channel, action="fichas", title="Películas Estreno", url=host + "/peliculas-estreno",
folder=True))
itemlist.append(Item(channel=item.channel, action="fichas", title="Películas Actualizadas",
url=host + "/peliculas-actualizadas", folder=True))
itemlist.append(
Item(channel=item.channel, action="fichas", title="Rating IMDB", url=host + "/peliculas/imdb_rating",
folder=True))
itemlist.append(Item(channel=item.channel, action="generos", title="Películas por Género", url=host, folder=True))
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Vistas[/B][/COLOR]",
url=host + "/a/my?target=movies&action=seen&start=-28&limit=28", folder=True))
return itemlist
def menuseries(item):
logger.info()
itemlist = []
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Siguiendo[/B][/COLOR]",
url=host + "/a/my?target=shows&action=following&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Para Ver[/B][/COLOR]",
url=host + "/a/my?target=shows&action=watch&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="series_abc", title="A-Z", folder=True))
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos Emitidos",
url=host + "/a/episodes?action=latest&start=-24&limit=24&elang=ALL", folder=True))
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Episodios Estreno",
url=host + "/a/episodes?action=premiere&start=-24&limit=24&elang=ALL", folder=True))
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Episodios Actualizados",
url=host + "/a/episodes?action=updated&start=-24&limit=24&elang=ALL", folder=True))
itemlist.append(
Item(channel=item.channel, action="fichas", title="Últimas series", url=host + "/series", folder=True))
itemlist.append(
Item(channel=item.channel, action="fichas", title="Rating IMDB", url=host + "/series/imdb_rating", folder=True))
itemlist.append(
Item(channel=item.channel, action="generos_series", title="Series por Género", url=host, folder=True))
itemlist.append(Item(channel=item.channel, action="listado_series", title="Listado de todas las series",
url=host + "/series/list", folder=True))
if account:
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Favoritas[/B][/COLOR]",
url=host + "/a/my?target=shows&action=favorite&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Pendientes[/B][/COLOR]",
url=host + "/a/my?target=shows&action=pending&start=-28&limit=28", folder=True))
itemlist.append(Item(channel=item.channel, action="items_usuario",
title="[COLOR orange][B]Vistas[/B][/COLOR]",
url=host + "/a/my?target=shows&action=seen&start=-28&limit=28", folder=True))
return itemlist
def search(item, texto):
logger.info()
data = agrupa_datos(httptools.downloadpage(host).data)
sid = scrapertools.find_single_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto
item.title = "Buscar..."
item.url = host + "/buscar"
item.texto = texto
try:
return fichas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def series_abc(item):
logger.info()
itemlist = []
az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ#"
for l in az:
itemlist.append(
Item(channel=item.channel, action='fichas', title=l, url=host + "/series/abc/" + l.replace('#', '9')))
return itemlist
def items_usuario(item):
logger.info()
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
## Fichas usuario
url = item.url.split("?")[0]
post = item.url.split("?")[1]
old_start = scrapertools.find_single_match(post, 'start=([^&]+)&')
limit = scrapertools.find_single_match(post, 'limit=(\d+)')
start = "%s" % (int(old_start) + int(limit))
post = post.replace("start=" + old_start, "start=" + start)
next_page = url + "?" + post
## Carga las fichas de usuario
data = httptools.downloadpage(url, post=post).data
fichas_usuario = jsontools.load(data)
for ficha in fichas_usuario:
try:
title = ficha['title']['es'].strip()
except:
title = ficha['title']['en'].strip()
try:
title = title.encode('utf-8')
except:
pass
show = title
try:
thumbnail = host + "/thumbs/" + ficha['thumbnail']
except:
thumbnail = host + "/thumbs/" + ficha['thumb']
try:
url = urlparse.urljoin(host, '/serie/' + ficha['permalink']) + "###" + ficha['id'] + ";1"
action = "episodios"
str = get_status(status, 'shows', ficha['id'])
if "show_title" in ficha:
action = "findvideos"
try:
serie = ficha['show_title']['es'].strip()
except:
serie = ficha['show_title']['en'].strip()
temporada = ficha['season']
episodio = ficha['episode']
serie = "[COLOR whitesmoke][B]" + serie + "[/B][/COLOR]"
if len(episodio) == 1: episodio = '0' + episodio
try:
title = temporada + "x" + episodio + " - " + serie + ": " + title
except:
title = temporada + "x" + episodio + " - " + serie.decode('iso-8859-1') + ": " + title.decode(
'iso-8859-1')
url = urlparse.urljoin(host, '/serie/' + ficha[
'permalink'] + '/temporada-' + temporada + '/episodio-' + episodio) + "###" + ficha['id'] + ";3"
except:
url = urlparse.urljoin(host, '/pelicula/' + ficha['perma']) + "###" + ficha['id'] + ";2"
action = "findvideos"
str = get_status(status, 'movies', ficha['id'])
if str != "": title += str
itemlist.append(
Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url, thumbnail=thumbnail,
show=show, folder=True))
if len(itemlist) == int(limit):
itemlist.append(
Item(channel=item.channel, action="items_usuario", title=">> Página siguiente", url=next_page, folder=True))
return itemlist
def listado_series(item):
logger.info()
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
patron = '<div class="list-item"><a href="([^"]+)"[^>]+>([^<]+)</a></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl + "###0;1"
itemlist.append(
Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=url,
show=scrapedtitle, contentType="tvshow"))
return itemlist
def fichas(item):
logger.info()
itemlist = []
textoidiomas=''
infoLabels=dict()
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
if item.title == "Buscar...":
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
s_p = scrapertools.find_single_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
'<h3 class="section-title">')
if len(s_p) == 1:
data = s_p[0]
if 'Lo sentimos</h3>' in s_p[0]:
return [Item(channel=item.channel, title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + item.texto.replace('%20',
' ') + "[/COLOR] sin resultados")]
else:
data = s_p[0] + s_p[1]
else:
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = re.sub(
r'<div class="span-6[^<]+<div class="item"[^<]+' + \
'<a href="([^"]+)"[^<]+' + \
'<img.*?src="([^"]+)".*?' + \
'<div class="left"(.*?)</div>' + \
'<div class="right"(.*?)</div>.*?' + \
'title="([^"]+)".*?' + \
'onclick="setFavorite.\d, (\d+),',
r"'url':'\1';'image':'\2';'langs':'\3';'rating':'\4';'title':\5;'id':'\6';",
data
)
patron = "'url':'([^']+)';'image':'([^']+)';'langs':'([^']+)';'rating':'([^']+)';'title':([^;]+);'id':'([^']+)';"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:
thumbnail = scrapedthumbnail
language = ''
title = scrapedtitle.strip()
show = title
contentTitle = scrapedtitle.strip()
if scrapedlangs != ">":
textoidiomas, language = extrae_idiomas(scrapedlangs)
#Todo Quitar el idioma
title += " ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])"
if scrapedrating != ">":
valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>', r'\1,\2', scrapedrating)
infoLabels['rating']=valoracion
title += " ([COLOR orange]" + valoracion + "[/COLOR])"
url = urlparse.urljoin(item.url, scrapedurl)
if "/serie" in url or "/tags-tv" in url:
action = "episodios"
url += "###" + scrapedid + ";1"
type = "shows"
contentType = "tvshow"
else:
action = "findvideos"
url += "###" + scrapedid + ";2"
type = "movies"
contentType = "movie"
str = get_status(status, type, scrapedid)
if str != "": title += str
if item.title == "Buscar...":
bus = host[-4:]
tag_type = scrapertools.find_single_match(url, '%s/([^/]+)/' %bus)
title += " - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]"
if "/serie" in url or "/tags-tv" in url:
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
contentSerieName=show, folder=True, contentType=contentType,
language =language, infoLabels=infoLabels))
else:
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=contentTitle, thumbnail=thumbnail,
folder=True, contentType=contentType, contentTitle=contentTitle,
language =language, infoLabels=infoLabels))
## Paginación
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)">.raquo;</a>')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="fichas", title=">> Página siguiente",
url=urlparse.urljoin(item.url, next_page_url), folder=True))
return itemlist
def episodios(item):
logger.info()
id = "0"
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
if "###" in item.url:
id = item.url.split("###")[1].split(";")[0]
type = item.url.split("###")[1].split(";")[1]
item.url = item.url.split("###")[0]
## Temporadas
data = agrupa_datos(httptools.downloadpage(item.url).data)
if id == "0":
## Se saca el id de la serie de la página cuando viene de listado_series
id = scrapertools.find_single_match(data, "<script>var sid = '([^']+)';</script>")
url_targets = url_targets.replace('###0', '###' + id)
str = get_status(status, "shows", id)
if str != "" and account and item.category != "Series" and "XBMC" not in item.title:
if config.get_videolibrary_support():
title = " ( [COLOR gray][B]" + item.contentSerieName + "[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url_targets,
thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=False))
title = str.replace('green', 'red').replace('Siguiendo', 'Abandonar')
itemlist.append(Item(channel=item.channel, action="set_status", title=title, url=url_targets,
thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=True))
elif account and item.category != "Series" and "XBMC" not in item.title:
if config.get_videolibrary_support():
title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url_targets,
thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=False))
title = " ( [COLOR orange][B]Seguir[/B][/COLOR] )"
itemlist.append(Item(channel=item.channel, action="set_status", title=title, url=url_targets,
thumbnail=item.thumbnail, contentSerieName=item.contentSerieName, folder=True))
patron = "<li><a href='([^']+)'>[^<]+</a></li>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
data = agrupa_datos(httptools.downloadpage(scrapedurl).data)
sid = scrapertools.find_single_match(data, "<script>var sid = '(\d+)'")
ssid = scrapertools.find_single_match(scrapedurl, "temporada-(\d+)")
post = "action=season&start=0&limit=0&show=%s&season=%s" % (sid, ssid)
url = host + "/a/episodes"
data = httptools.downloadpage(url, post=post).data
episodes = jsontools.load(data)
for episode in episodes:
thumbnail = host + "/thumbs/" + episode['thumbnail']
language = episode['languages']
temporada = episode['season']
episodio = episode['episode']
if len(episodio) == 1: episodio = '0' + episodio
if episode['languages'] != "[]":
idiomas = "( [COLOR teal][B]"
for idioma in episode['languages']: idiomas += idioma + " "
idiomas += "[/B][/COLOR])"
idiomas = idiomas
else:
idiomas = ""
if episode['title']:
try:
title = episode['title']['es'].strip()
except:
title = episode['title']['en'].strip()
if len(title) == 0: title = "Temporada " + temporada + " Episodio " + episodio
try:
title = temporada + "x" + episodio + " - " + title.decode('utf-8') + ' ' + idiomas
except:
title = temporada + "x" + episodio + " - " + title.decode('iso-8859-1') + ' ' + idiomas
str = get_status(status, 'episodes', episode['id'])
if str != "": title += str
try:
title = title.encode('utf-8')
except:
title = title.encode('iso-8859-1')
url = urlparse.urljoin(scrapedurl, 'temporada-' + temporada + '/episodio-' + episodio) + "###" + episode[
'id'] + ";3"
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
thumbnail=thumbnail, contentSerieName=item.contentSerieName, folder=True, contentType="episode",
language=language))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=url_targets,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=url_targets,
action="download_all_episodes", extra="episodios"))
return itemlist
def novedades_episodios(item):
logger.info()
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
## Episodios
url = item.url.split("?")[0]
post = item.url.split("?")[1]
old_start = scrapertools.find_single_match(post, 'start=([^&]+)&')
start = "%s" % (int(old_start) + 24)
post = post.replace("start=" + old_start, "start=" + start)
next_page = url + "?" + post
data = httptools.downloadpage(url, post=post).data
episodes = jsontools.load(data)
for episode in episodes:
thumbnail = host + "/thumbs/" + episode['thumbnail']
temporada = episode['season']
episodio = episode['episode']
if len(episodio) == 1: episodio = '0' + episodio
if episode['languages'] != "[]":
idiomas = "( [COLOR teal][B]"
for idioma in episode['languages']: idiomas += idioma + " "
idiomas += "[/B][/COLOR])"
idiomas = idiomas
else:
idiomas = ""
try:
contentSerieName = episode['show']['title']['es'].strip()
except:
contentSerieName = episode['show']['title']['en'].strip()
show = "[COLOR whitesmoke][B]" + contentSerieName + "[/B][/COLOR]"
if episode['title']:
try:
title = episode['title']['es'].strip()
except:
title = episode['title']['en'].strip()
if len(title) == 0: title = "Temporada " + temporada + " Episodio " + episodio
try:
title = temporada + "x" + episodio + " - " + show.decode('utf-8') + ": " + title.decode(
'utf-8') + ' ' + idiomas
except:
title = temporada + "x" + episodio + " - " + show.decode('iso-8859-1') + ": " + title.decode(
'iso-8859-1') + ' ' + idiomas
str = get_status(status, 'episodes', episode['id'])
if str != "": title += str
try:
title = title.encode('utf-8')
except:
title = title.encode('iso-8859-1')
url = urlparse.urljoin(host, '/serie/' + episode[
'permalink'] + '/temporada-' + temporada + '/episodio-' + episodio) + "###" + episode['id'] + ";3"
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, contentSerieName=contentSerieName, url=url, thumbnail=thumbnail,
folder=True, contentType="episode"))
if len(itemlist) == 24:
itemlist.append(
Item(channel=item.channel, action="novedades_episodios", title=">> Página siguiente", url=next_page,
folder=True))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="%s/peliculas"(.*?)</ul>' % host)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
itemlist.append(Item(channel=item.channel, action="fichas", title=title, url=url, folder=True))
return itemlist
def generos_series(item):
logger.info()
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="%s/series"(.*?)</ul>' % host)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
itemlist.append(Item(channel=item.channel, action="fichas", title=title, url=url, folder=True))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
it1 = []
it2 = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
## Vídeos
id = ""
type = ""
if "###" in item.url:
id = item.url.split("###")[1].split(";")[0]
type = item.url.split("###")[1].split(";")[1]
item.url = item.url.split("###")[0]
if type == "2" and account and item.category != "Cine":
title = " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )"
if "Favorito" in item.title:
title = " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )"
if config.get_videolibrary_support():
title_label = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )"
it1.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label,
url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False))
title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )"
it1.append(
Item(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url,
thumbnail=item.thumbnail, show=item.show))
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, language=item.language, folder=True))
data_js = httptools.downloadpage("%s/templates/hdfull/js/jquery.hdfull.view.min.js" % host).data
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("%s/js/providers.js" % host).data
decoded = jhexdecode(data_js)
providers_pattern = 'p\[(\d+)\]= {"t":"([^"]+)","d":".*?","e":.function.*?,"l":.function.*?return "([^"]+)".*?};'
providers = scrapertools.find_multiple_matches (decoded, providers_pattern)
provs = {}
for provider, e, l in providers:
provs[provider]=[e,l]
data = agrupa_datos(httptools.downloadpage(item.url).data)
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
infolabels = {}
year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
infolabels["year"] = year
matches = []
for match in data_decrypt:
if match['provider'] in provs:
try:
embed = provs[match['provider']][0]
url = provs[match['provider']][1]+match['code']
matches.append([match['lang'], match['quality'], url, embed])
except:
pass
for idioma, calidad, url, embed in matches:
if embed == 'd':
option = "Descargar"
option1 = 2
else:
option = "Ver"
option1 = 1
calidad = unicode(calidad, "utf8").upper().encode("utf8")
title = option + ": %s (" + calidad + ")" + " (" + idioma + ")"
thumbnail = item.thumbnail
plot = item.title + "\n\n" + scrapertools.find_single_match(data,
'<meta property="og:description" content="([^"]+)"')
plot = scrapertools.htmlclean(plot)
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
it2.append(
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels, language=idioma,
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:
if "###" not in item.url:
item.url += "###" + id + ";" + type
itemlist.extend(it1)
itemlist.extend(it2)
## 2 = película
if type == "2" and item.category != "Cine":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail,
fulltitle = item.contentTitle
))
return itemlist
def play(item):
if "###" in item.url:
id = item.url.split("###")[1].split(";")[0]
type = item.url.split("###")[1].split(";")[1]
item.url = item.url.split("###")[0]
post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
data = httptools.downloadpage(host + "/a/status", post=post).data
devuelve = servertools.findvideosbyserver(item.url, item.server)
if devuelve:
item.url = devuelve[0][1]
else:
devuelve = servertools.findvideos(item.url, True)
if devuelve:
item.url = devuelve[0][1]
item.server = devuelve[0][2]
item.thumbnail = item.contentThumbnail
item.fulltitle = item.contentTitle
return [item]
def agrupa_datos(data):
## Agrupa los datos
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|<!--.*?-->', '', data)
data = re.sub(r'\s+', ' ', data)
data = re.sub(r'>\s<', '><', data)
return data
def extrae_idiomas(bloqueidiomas):
logger.info()
language=[]
textoidiomas = ''
patronidiomas = '([a-z0-9]+).png"'
idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas)
for idioma in idiomas:
# TODO quitar esto
textoidiomas = textoidiomas + idioma +" "
# TODO y dejar esto
language.append(idioma)
return textoidiomas, language
## --------------------------------------------------------------------------------
def set_status(item):
if "###" in item.url:
id = item.url.split("###")[1].split(";")[0]
type = item.url.split("###")[1].split(";")[1]
# item.url = item.url.split("###")[0]
if "Abandonar" in item.title:
path = "/a/status"
post = "target_id=" + id + "&target_type=" + type + "&target_status=0"
elif "Seguir" in item.title:
target_status = "3"
path = "/a/status"
post = "target_id=" + id + "&target_type=" + type + "&target_status=3"
elif "Agregar a Favoritos" in item.title:
path = "/a/favorite"
post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=1"
elif "Quitar de Favoritos" in item.title:
path = "/a/favorite"
post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=-1"
data = httptools.downloadpage(host + path, post=post).data
title = "[COLOR green][B]OK[/B][/COLOR]"
return [Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=item.url,
thumbnail=item.thumbnail, show=item.show, folder=False)]
def get_status(status, type, id):
if type == 'shows':
state = {'0': '', '1': 'Finalizada', '2': 'Pendiente', '3': 'Siguiendo'}
else:
state = {'0': '', '1': 'Visto', '2': 'Pendiente'}
str = "";
str1 = "";
str2 = ""
try:
if id in status['favorites'][type]:
str1 = " [COLOR orange][B]Favorito[/B][/COLOR]"
except:
str1 = ""
try:
if id in status['status'][type]:
str2 = state[status['status'][type][id]]
if str2 != "": str2 = "[COLOR green][B]" + state[status['status'][type][id]] + "[/B][/COLOR]"
except:
str2 = ""
if str1 != "" or str2 != "":
str = " (" + str1 + str2 + " )"
return str
## --------------------------------------------------------------------------------
## --------------------------------------------------------------------------------
def jhexdecode(t):
r = re.sub(r'_\d+x\w+x(\d+)', 'var_' + r'\1', t)
r = re.sub(r'_\d+x\w+', 'var_0', r)
def to_hx(c):
h = int("%s" % c.groups(0), 16)
if 19 < h < 160:
return chr(h)
else:
return ""
r = re.sub(r'(?:\\|)x(\w{2})', to_hx, r).replace('var ', '')
f = eval(scrapertools.find_single_match(r, '\s*var_0\s*=\s*([^;]+);'))
for i, v in enumerate(f):
r = r.replace('[[var_0[%s]]' % i, "." + f[i])
r = r.replace(':var_0[%s]' % i, ":\"" + f[i] + "\"")
r = r.replace(' var_0[%s]' % i, " \"" + f[i] + "\"")
r = r.replace('(var_0[%s]' % i, "(\"" + f[i] + "\"")
r = r.replace('[var_0[%s]]' % i, "." + f[i])
if v == "": r = r.replace('var_0[%s]' % i, '""')
r = re.sub(r':(function.*?\})', r":'\g<1>'", r)
r = re.sub(r':(var[^,]+),', r":'\g<1>',", r)
return r
def obfs(data, key, n=126):
chars = list(data)
for i in range(0, len(chars)):
c = ord(chars[i])
if c <= n:
number = (ord(chars[i]) + key) % n
chars[i] = chr(number)
return "".join(chars)

View File

@@ -1,70 +0,0 @@
{
"id": "homecine",
"name": "HomeCine",
"active": true,
"adult": false,
"language": ["lat","cast"],
"thumbnail": "https://homecine.net/wp-content/uploads/2018/05/homedark-1-3.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"direct"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,358 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = ['HD 720p', 'HD 1080p', '480p', '360p']
list_servers = ['cinemaupload']
host = 'https://homecine.net'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas",
action="list_all",
thumbnail=get_thumb('last', auto=True),
url='%s%s' % (host, '/release-year/2019'),
first=0
))
itemlist.append(Item(channel=item.channel,title="Películas",
action="sub_menu",
thumbnail=get_thumb('movies', auto=True),
))
itemlist.append(Item(channel=item.channel,title="Series",
action="list_all",
thumbnail=get_thumb('tvshows', auto=True),
url='%s%s'%(host,'/series/'),
first=0
))
itemlist.append(Item(channel=item.channel, title="Documentales",
action="list_all",
thumbnail=get_thumb('documentaries', auto=True),
url='%s%s' % (host, '/documentales/'),
first=0
))
itemlist.append(Item(channel=item.channel,title="Buscar",
action="search",
url=host+'/?s=',
thumbnail=get_thumb('search', auto=True),
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def sub_menu(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel,title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url='%s%s' % (host, '/peliculas/'),
first=0
))
itemlist.append(Item(channel=item.channel, title="Mas vistas",
action="list_all",
thumbnail=get_thumb('more watched', auto=True),
url='%s%s' % (host, '/most-viewed/'),
first=0
))
itemlist.append(Item(channel=item.channel,title="Generos",
action="seccion",
thumbnail=get_thumb('genres', auto=True),
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
url=host,
))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
next = False
data = get_source(item.url)
patron = 'movie-id="\d+".*?<a href="([^"]+)".*?<.*?original="([^"]+)".*?<h2>([^<]+)</h2>.*?jtip(.*?)clearfix'
matches = re.compile(patron, re.DOTALL).findall(data)
first = item.first
last = first + 19
if last > len(matches):
last = len(matches)
next = True
for scrapedurl, scrapedthumbnail, scrapedtitle, extra_info in matches[first:last]:
year = scrapertools.find_single_match(extra_info, '"tag">(\d{4})<')
url = host+scrapedurl
thumbnail = host+scrapedthumbnail.strip()
title = scrapedtitle
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels = {'year': year}
)
if 'series' in scrapedurl:
new_item.action = 'seasons'
new_item.contentSerieName = title
else:
new_item.action = 'findvideos'
new_item.contentTitle = title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
if not next:
url_next_page = item.url
first = last
else:
url_next_page = scrapertools.find_single_match(data, "<li class='active'>.*?class='page larger' href='([^']+)'")
url_next_page = host+url_next_page
first = 0
if url_next_page:
itemlist.append(Item(channel=item.channel,title="Siguiente >>", url=url_next_page, action='list_all',
first=first))
return itemlist
def seccion(item):
logger.info()
itemlist = []
duplicado = []
data = get_source(item.url)
patron = 'menu-item-object-category menu-item-\d+"><a href="([^"]+)">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = host+scrapedurl
title = scrapedtitle
thumbnail = ''
if url not in duplicado:
itemlist.append(Item(channel=item.channel,
action='list_all',
title=title,
url=url,
thumbnail=thumbnail,
first=0
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<strong>Season (\d+)</strong>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedseason in matches:
contentSeasonNumber = scrapedseason
title = 'Temporada %s' % scrapedseason
infoLabels['season'] = contentSeasonNumber
itemlist.append(Item(channel=item.channel,
action='episodesxseason',
url=item.url,
title=title,
contentSeasonNumber=contentSeasonNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
extra1='library'
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
data = get_source(item.url)
data = scrapertools.find_single_match(data, '<strong>Season %s</strong>.*?class="les-content"(.*?)</div>' % season)
patron = '<a href="([^"]+)">Episode (\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, dataep in matches:
url = host+scrapedurl
contentEpisodeNumber = dataep
try:
title = '%sx%s - Episodio %s' % (season, dataep, dataep)
except:
title = 'episodio %s' % dataep
infoLabels['episode'] = dataep
infoLabels = item.infoLabels
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.first=0
if texto != '':
return list_all(item)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host +'/peliculas'
elif categoria == 'infantiles':
item.url = host + '/animacion/'
elif categoria == 'terror':
item.url = host + '/terror/'
item.first=0
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div id="tab(\d+)".*?<iframe.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, url in matches:
extra_info = scrapertools.find_single_match(data, '<a href="#tab%s">(.*?)<' % option)
if '-' in extra_info:
quality, language = scrapertools.find_single_match(extra_info, '(.*?) - (.*)')
else:
language = ''
quality = extra_info
if 'https:' not in url:
url = 'https:'+url
title = ''
if not config.get_setting('unify'):
if language != '':
title += ' [%s]' % IDIOMAS[language]
if quality != '':
title += ' [%s]' % quality
new_item = Item(channel=item.channel,
url=url,
title= '%s'+ title,
contentTitle=item.title,
action='play',
infoLabels = item.infoLabels
)
if language != '':
new_item.language = IDIOMAS[language]
if quality != '':
new_item.quality = quality
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle,
))
return itemlist

View File

@@ -1,114 +0,0 @@
{
"id": "inkapelis",
"name": "Inkapelis",
"active": true,
"adult": false,
"language": ["esp", "lat", "cast"],
"thumbnail": "https://www.inkapelis.com/wp-content/uploads/2016/07/logitoinkapelis-min.png",
"banner": "inkapelis.png",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
},
{
"id": "filtro_defecto_peliculas",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_peliculas1",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_peliculas2",
"type": "label",
"enabled": true,
"visible": false
},
{
"id": "pers_peliculas3",
"type": "label",
"enabled": true,
"visible": false
}
]
}

View File

@@ -1,462 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
from channels import filtertools
from channels import autoplay
__modo_grafico__ = config.get_setting("modo_grafico", "inkapelis")
__perfil__ = config.get_setting("perfil", "inkapelis")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E']]
color1, color2, color3, color4 = perfil[__perfil__]
IDIOMAS = {'Latino': 'LAT', 'Español':'CAST', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = ['Cam', 'TSHQ', 'Dvdrip', 'Blurayrip', 'HD Rip 320p', 'hd rip 320p', 'HD Real 720p', 'Full HD 1080p']
list_servers = ['openload', 'gamovideo', 'streamplay', 'streamango', 'vidoza']
host = 'https://www.inkapelis.to/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas", url=host,
extra="Novedades", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
#itemlist.append(Item(channel=item.channel, title="Estrenos", action="entradas", url="http://www.inkapelis.com/genero/estrenos/",
# text_color=color1, thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="entradas",
url=host+"?anio=&genero=&calidad=&idioma=Castellano&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('espanolas', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="entradas",
url=host+"?anio=&genero=&calidad=&idioma=Latino&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('latino', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="entradas",
url=host+"?anio=&genero=&calidad=&idioma=Subtitulada&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
itemlist.append(Item(channel=item.channel, title="Géneros", action="generos", url=host, text_color=color1,
thumbnail=get_thumb('genres', auto=True),))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host+"?s=", text_color=color1))
itemlist.append(Item(channel=item.channel, action="", title=""))
itemlist.append(
Item(channel=item.channel, action="filtro", title="Filtrar películas", url=host+"?s=", text_color=color1))
# Filtros personalizados para peliculas
for i in range(1, 4):
filtros = config.get_setting("pers_peliculas" + str(i), item.channel)
if filtros:
title = "Filtro Personalizado " + str(i)
new_item = item.clone()
new_item.values = filtros
itemlist.append(
new_item.clone(action="filtro", title=title, url=host+"?s=", text_color=color2))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
item.action = "entradas"
item.extra = "Novedades"
if categoria == "terror":
item.url = host+"genero/terror/"
item.action = "entradas"
if categoria == "castellano":
item.url = host+"?anio=&genero=&calidad=&idioma=Castellano&s="
item.extra = "Buscar"
item.action = "entradas"
if categoria == "latino":
item.url = host+"?anio=&genero=&calidad=&idioma=Latino&s="
item.extra = "Buscar"
item.action = "entradas"
itemlist = entradas(item)
if itemlist[-1].action == "entradas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info()
itemlist = []
item.extra = "Buscar"
item.url = host+"?s=%s" % texto
try:
return entradas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def generos(item):
logger.info()
itemlist = []
item.text_color = color1
data = httptools.downloadpage(item.url).data
matches = scrapertools.find_multiple_matches(data, '<li class="cat-item cat-item-.*?><a href="([^"]+)".*?>(.*?)<b>')
for scrapedurl, scrapedtitle in matches:
if scrapedtitle == "Eroticas +18 " and config.get_setting("adult_mode") != 0:
itemlist.append(item.clone(action="eroticas", title=scrapedtitle, url=scrapedurl))
elif (scrapedtitle != "Estrenos ") and (scrapedtitle != "Próximos Estrenos "):
itemlist.append(item.clone(action="entradas", title=scrapedtitle, url=scrapedurl))
return itemlist
def filtro(item):
logger.info()
list_controls = []
valores = {}
strings = {}
# Se utilizan los valores por defecto/guardados o los del filtro personalizado
if not item.values:
valores_guardados = config.get_setting("filtro_defecto_peliculas", item.channel)
else:
valores_guardados = item.values
item.values = ""
if valores_guardados:
dict_values = valores_guardados
else:
dict_values = None
if dict_values:
dict_values["filtro_per"] = 0
list_controls.append({'id': 'texto', 'label': 'Cadena de búsqueda', 'enabled': True,
'type': 'text', 'default': '', 'visible': True})
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
matches = scrapertools.find_multiple_matches(data, 'option value="">([^<]+)</option>(.*?)</select>')
i = 1
for filtro_title, values in matches:
id = filtro_title.replace("A\xc3\xb1o", "year").lower()
filtro_title = filtro_title.replace("A\xc3\xb1o", "Año")
list_controls.append({'id': id, 'label': filtro_title, 'enabled': True,
'type': 'list', 'default': 0, 'visible': True})
valores[id] = []
valores[id].append('')
strings[filtro_title] = []
list_controls[i]['lvalues'] = []
list_controls[i]['lvalues'].append('Cualquiera')
strings[filtro_title].append('Cualquiera')
patron = '<option value="([^"]+)">([^<]+)</option>'
matches_v = scrapertools.find_multiple_matches(values, patron)
for value, key in matches_v:
list_controls[i]['lvalues'].append(key)
valores[id].append(value)
strings[filtro_title].append(key)
i += 1
item.valores = valores
item.strings = strings
if "Filtro Personalizado" in item.title:
return filtrado(item, valores_guardados)
list_controls.append({'id': 'espacio', 'label': '', 'enabled': False,
'type': 'label', 'default': '', 'visible': True})
list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True,
'type': 'bool', 'default': False, 'visible': True})
list_controls.append({'id': 'filtro_per', 'label': 'Guardar filtro en acceso directo...', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No guardar', 'Filtro 1',
'Filtro 2', 'Filtro 3']})
list_controls.append({'id': 'remove', 'label': 'Eliminar filtro personalizado...', 'enabled': True,
'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No eliminar', 'Filtro 1',
'Filtro 2', 'Filtro 3']})
from platformcode import platformtools
return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values,
caption="Filtra los resultados", item=item, callback='filtrado')
def filtrado(item, values):
values_copy = values.copy()
# Guarda el filtro para que sea el que se cargue por defecto
if "save" in values and values["save"]:
values_copy.pop("remove")
values_copy.pop("filtro_per")
values_copy.pop("save")
config.set_setting("filtro_defecto_peliculas", values_copy, item.channel)
# Elimina el filtro personalizado elegido
if "remove" in values and values["remove"] != 0:
config.set_setting("pers_peliculas" + str(values["remove"]), "", item.channel)
values_copy = values.copy()
# Guarda el filtro en un acceso directo personalizado
if "filtro_per" in values and values["filtro_per"] != 0:
index = "peliculas" + str(values["filtro_per"])
values_copy.pop("filtro_per")
values_copy.pop("save")
values_copy.pop("remove")
config.set_setting("pers_" + index, values_copy, item.channel)
genero = item.valores["genero"][values["genero"]]
year = item.valores["year"][values["year"]]
calidad = item.valores["calidad"][values["calidad"]]
idioma = item.valores["idioma"][values["idioma"]]
texto = values["texto"].replace(" ", "+")
strings = []
for key, value in dict(item.strings).items():
key2 = key.replace("Año", "year").lower()
strings.append(key + ": " + value[values[key2]])
strings.append("Texto: " + texto)
item.valores = "Filtro: " + ", ".join(sorted(strings))
item.strings = ""
item.url = host+"?anio=%s&genero=%s&calidad=%s&idioma=%s&s=%s" % \
(year, genero, calidad, idioma, texto)
item.extra = "Buscar"
return entradas(item)
def entradas(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub("\n", "", data)
if "valores" in item and item.valores:
itemlist.append(item.clone(action="", title=item.valores, text_color=color4))
# IF en caso de busqueda
if item.extra == "Buscar":
# Extrae las entradas
entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
patron = '<div class="poster-media-card([^"]+)">.*?<a href="([^"]+)" title="([^"]+)">' \
'.*?<img.*?src="([^"]+)"'
for match in entradas:
matches = scrapertools.find_multiple_matches(match, patron)
for calidad, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
thumbnail = scrapedthumbnail.replace("w185", "original")
title = scrapedtitle
calidad = calidad.strip()
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, thumbnail=thumbnail,
contentTitle=scrapedtitle, fulltitle=scrapedtitle,
context=["buscar_trailer"],
contentType="movie"))
else:
# Extrae las entradas
if item.extra == "Novedades":
data2 = data.split("<h2>Últimas Películas Agregadas y Actualizadas</h2>", 1)[1]
entradas = scrapertools.find_multiple_matches(data2, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
else:
entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
patron = '<div class="poster-media-card([^"]+)">.*?<a href="([^"]+)" title="([^"]+)">' \
'.*?<div class="idiomes"><div class="(.*?)">.*?' \
'<img.*?src="([^"]+)".*?<span class="under-title">(.*?)</span>'
for match in entradas:
matches = scrapertools.find_multiple_matches(match, patron)
for calidad, url, scrapedtitle, idioma, scrapedthumbnail, category in matches:
# Salto entradas adultos
if category == "Eroticas +18":
continue
idioma = idioma.strip()
if idioma in IDIOMAS:
idioma = IDIOMAS[idioma]
else:
idioma = IDIOMAS['Subtitulado']
calidad = calidad.strip()
scrapedtitle = scrapedtitle.replace("Ver Pelicula ", "")
title = scrapedtitle
if idioma:
title += " [" + idioma + "]"
if calidad:
title += " [" + calidad + "]"
if 'class="proximamente"' in match:
title += " [Próximamente]"
thumbnail = scrapedthumbnail.replace("w185", "original")
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, contentTitle=scrapedtitle,
fulltitle=scrapedtitle, thumbnail=thumbnail, context=["buscar_trailer"],
contentType="movie", infoLabels={'filtro': filtro_list}))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Extrae la marca de la siguiente página
next_page = scrapertools.find_single_match(data, '<span class="current">.*?<\/span><a href="([^"]+)"')
if next_page:
if item.extra == "Buscar":
next_page = next_page.replace('&#038;', '&')
itemlist.append(Item(channel=item.channel, action="entradas", title="Siguiente", url=next_page, text_color=color3))
return itemlist
def eroticas(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
# Extrae las entradas
entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
patron = '<div class="poster-media-card([^"]+)">.*?<a href="([^"]+)" title="([^"]+)">' \
'.*?<div class="idiomes"><div class="(.*?)">.*?' \
'<img.*?src="([^"]+)"'
for match in entradas:
matches = scrapertools.find_multiple_matches(match, patron)
for calidad, url, scrapedtitle, idioma, scrapedthumbnail in matches:
title = scrapedtitle + " [" + idioma + "] [" + calidad + "]"
thumbnail = scrapedthumbnail.replace("w185", "original")
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
extra="eroticas"))
# Extrae la marca de la siguiente página
next_page = scrapertools.find_single_match(data, '<span class="current">.*?<\/span><a href="([^"]+)"')
if next_page:
itemlist.append(Item(channel=item.channel, action="entradas", title="Siguiente", url=next_page))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = re.sub("\n", "", data)
sinopsis = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>.*?>(.*?)</p>')
item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)
# Busca en tmdb si no se ha hecho antes
if item.extra != "eroticas":
if item.extra != "library":
year = scrapertools.find_single_match(data, 'Año de lanzamiento.*?"ab">(\d+)')
if year:
try:
item.infoLabels['year'] = year
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(item, __modo_grafico__)
except:
pass
trailer_url = scrapertools.find_single_match(data, 'id="trailerpro">.*?src="([^"]+)"')
item.infoLabels["trailer"] = "www.youtube.com/watch?v=TqqF3-qgJw4"
patron = '<td><a href="([^"]+)".*?title="([^"]+)".*?<td>([^"]+)<\/td><td>([^"]+)<\/td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, server, idioma, calidad in matches:
if idioma in IDIOMAS:
idioma= IDIOMAS[idioma]
else:
idioma = IDIOMAS['Subtitulado']
if server == "Embed":
server = "Nowvideo"
if server == "Ul":
server = "Uploaded"
title = "%s [%s][%s]" % (server, idioma, calidad)
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma,
quality=calidad, server=server, infoLabels=item.infoLabels))
patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for id_embed, calidad, url in matches:
title = scrapertools.find_single_match(url, "(?:http://|https://|//)(.*?)(?:embed.|videoembed|)/")
if re.search(r"(?i)inkapelis|goo.gl", title):
title = "Directo"
idioma = scrapertools.find_single_match(data, 'href="#%s".*?>([^<]+)<' % id_embed)
title = "%s [%s][%s]" % (title.capitalize(), idioma, calidad)
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, language=idioma,
quality=calidad, server=server))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist:
if not config.get_setting('menu_trailer', item.channel):
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, fulltitle=item.fulltitle,
infoLabels={'title': item.fulltitle}, text_color="green", extra="library"))
return itemlist
def play(item):
logger.info()
itemlist = []
if "drive.php?v=" in item.url or "//goo.gl/" in item.url:
data = httptools.downloadpage(item.url).data.replace("\\", "")
matches = scrapertools.find_multiple_matches(data, '"label":(.*?),.*?type":".*?/([^"]+)".*?file":"([^"]+)"')
for calidad, ext, url in matches:
title = ".%s %s [directo]" % (ext, calidad)
itemlist.insert(0, [title, url])
else:
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.infoLabels=item.infoLabels
return itemlist

Some files were not shown because too many files have changed in this diff Show More