Merge remote-tracking branch 'alfa-addon/master' into Fixes

This commit is contained in:
unknown
2017-10-26 08:02:22 -03:00
17 changed files with 839 additions and 228 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.2.4" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.3.0" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,12 +19,14 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» playmax » playpornx
» canalporno » divxatope
» flashx » verpeliculasnuevas
» animeflv_me » hdfull
» pelismundo » downace
» gamovideo ¤ arreglos internos
» cartoonlatino » serieslan
» pelisplus » pedropolis
» flashx » cinetux
» animeflv_ru » streamixcloud
» estrenosgo » animemovil
» allpeliculas » pelismundo
¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -35,12 +35,62 @@ def mainlist(item):
url= host + "movies/newmovies?page=1", extra1 = 0))
itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/getGanres"))
itemlist.append(item.clone(title="Colecciones", action="colecciones", fanart="http://i.imgur.com/c3HS8kj.png",
url= host))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search"))
return itemlist
def colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'href="(/peliculas[^"]+).*?'
patron += 'title_geo"><span>([^<]+).*?'
patron += 'title_eng"><span>([^<]+).*?'
patron += 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedcantidad, scrapedthumbnail in matches:
if scrapedtitle == "LGTB" and config.get_setting("adult_mode") == 0:
continue
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
thumbnail = host + scrapedthumbnail,
title = title,
url = host + scrapedurl
))
return itemlist
def listado_colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
post = "page=1"
data = httptools.downloadpage(host + data_url, post=post).data
patron = 'a href="(/peli[^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'class="c_fichas_title">([^<]+).*?'
patron += 'Año:.*?href="">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
item.infoLabels['year'] = scrapedyear
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = host + scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def generos(item):
logger.info()
itemlist = []
@@ -61,6 +111,9 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "Próximamente" in data:
itemlist.append(Item(channel = item.channel, title = "Próximamente"))
return itemlist
patron = 'data-link="([^"]+).*?'
patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
@@ -137,7 +190,7 @@ def lista(item):
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
texto = texto.replace(" ", "%20")
item.url = host + "/movies/search/" + texto
item.extra = "busqueda"
try:

View File

@@ -162,27 +162,20 @@ def novedades_anime(item):
def listado(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
# logger.debug("datito %s" % data)
url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')
matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
'<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
re.DOTALL).findall(data)
itemlist = []
for thumbnail, url, title, genres, plot in matches:
title = clean_title(title)
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
@@ -192,28 +185,22 @@ def listado(item):
else:
new_item.show = title
new_item.context = renumbertools.context(item)
itemlist.append(new_item)
if url_pagination:
url = urlparse.urljoin(HOST, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.plot == "":
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>')
matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data)
@@ -221,7 +208,6 @@ def episodios(item):
title = title.strip()
url = urlparse.urljoin(item.url, url)
thumbnail = item.thumbnail
try:
episode = int(scrapertools.find_single_match(title, "Episodio (\d+)"))
except ValueError:
@@ -229,42 +215,36 @@ def episodios(item):
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title,
fanart=thumbnail, contentType="episode"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
_id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/')
post = "embed_id=%s" % _id
data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data
dict_data = jsontools.load(data)
headers = dict()
headers["Referer"] = item.url
data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data
dict_data = jsontools.load(data)
list_videos = dict_data["playlist"][0]["sources"]
if not dict_data:
return itemlist
list_videos = dict_data["playlist"][0]
if isinstance(list_videos, list):
for video in list_videos:
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
itemlist.append(Item(channel=item.channel, action="play", url=video["file"],
show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail))
else:
for video in list_videos.values():
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
video += "|User-Agent=Mozilla/5.0"
itemlist.append(Item(channel=item.channel, action="play", url=video, show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail))
return itemlist

View File

@@ -0,0 +1,50 @@
{
"id": "animemovil",
"name": "Animemovil",
"active": true,
"adult": false,
"language": ["*"],
"thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png",
"banner": "",
"version": 1,
"changes": [
{
"date": "24/10/2017",
"description": "Primera version"
}
],
"categories": [
"anime"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,406 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from core import httptools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import platformtools, config, logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
__perfil__ = int(config.get_setting('perfil', "animemovil"))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://animemovil.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", thumbnail=item.thumbnail,
url="%s/_API/?src=animesRecientes&offset=0" % host, text_color=color1))
itemlist.append(Item(channel=item.channel, action="emision", title="En emisión", thumbnail=item.thumbnail,
url="%s/anime/emision" % host, text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
text_color=color2))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
thumbnail=item.thumbnail, text_color=color3))
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
if renumbertools.context:
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def openconfig(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
item.url = "%s/?s=%s" % (host, texto.replace(" ", "+"))
try:
return recientes(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def recientes(item):
logger.info()
item.contentType = "tvshow"
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="emision"(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
try:
contentTitle = re.split(r"(?i) \d+ (?:Sub Español|Audio Español|Español Latino)", title)[0]
except:
contentTitle = ""
contentTitle = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", contentTitle)
tipo = "tvshow"
show = contentTitle
action = "episodios"
context = renumbertools.context
if item.extra == "recientes":
action = "findvideos"
context = ""
if not item.extra and (url.endswith("-pelicula/") or url.endswith("-pelicula")):
tipo = "movie"
show = ""
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
thumb_=thumb, contentType=tipo, context=context))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.extra and itemlist:
for it in itemlist:
it.thumbnail = it.thumb_
except:
pass
return itemlist
def listado(item):
logger.info()
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url).data)
for it in data.get("items", []):
scrapedtitle = it["title"]
url = "%s/%s" % (host, it["url"])
thumb = "http://img.animemovil.com/w440-h250-c/%s" % it["img"]
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
context=renumbertools.context, contentType=tipo))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if data["buttom"] and itemlist:
offset = int(scrapertools.find_single_match(item.url, 'offset=(\d+)')) + 1
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
def indices(item):
logger.info()
itemlist = []
if "Índices" in item.title:
itemlist.append(item.clone(title="Por Género", url="%s/anime/generos/" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime/" % host))
itemlist.append(item.clone(action="completo", title="Lista completa de Animes",
url="%s/anime/lista/" % host))
else:
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<div class="letras">(.*?)</div>')
patron = '<a title="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for title in matches:
if "Letra" in item.title:
url = "%s/_API/?src=animesLetra&offset=0&letra=%s" % (host, title)
else:
url = "%s/_API/?src=animesGenero&offset=0&genero=%s" % (host, title)
itemlist.append(item.clone(action="listado", url=url, title=title))
return itemlist
def completo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="listadoAnime">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
scrapedtitle = title
thumb = thumb.replace("s90-c", "w440-h250-c")
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, thumbnail=thumb,
text_color=color3, contentTitle=title, contentSerieName=show, extra="completo",
context=renumbertools.context, contentType=tipo, infoLabels=infoLabels))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
show = scrapertools.find_single_match(data, '<title>\s*([^<]+)\s*</title>')
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
for url, title in matches:
url = host + url
epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
new_item = item.clone(action="findvideos", url=url, title=title, extra="", context=renumbertools.context)
if epi:
season, episode = renumbertools.numbered_for_tratk(
item.channel, show, 1, int(epi))
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.title = "%sx%s %s" % (season, episode, title)
itemlist.append(new_item)
if item.infoLabels.get("tmdb_id") or item.extra == "recientes" or item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if config.get_videolibrary_support() and itemlist:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, text_color=color4, fanart=item.fanart,
thumbnail=item.thumbnail))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
if item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
data = httptools.downloadpage(item.url).data
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
if len(matches) == 1:
item.url = host + matches[0][0]
itemlist = findvideos(item)
else:
for url, title in matches:
itemlist.append(item.clone(action="findvideos", title=title, url=url, extra=""))
return itemlist
def emision(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloques = scrapertools.find_multiple_matches(data, '<div class="horario">.*?</i>\s*(.*?)</span>(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
for dia, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
itemlist.append(item.clone(action="", title=dia, text_color=color1))
for url, title, thumb in matches:
url = host + url
scrapedtitle = " %s" % title
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", title)
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context, infoLabels=infoLabels))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
id = scrapertools.find_single_match(data, '"id":"([^"]+)"')
bloque = scrapertools.find_single_match(data, 'ul class="controles">(.*?)</ul>')
patron = '<li title="([^"]+)" id="[^"]*" host="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for title, server in matches:
if title == "Vizard":
continue
title = "%s - %s" % (title, item.title)
post = "host=%s&id=%s" % (server, id)
itemlist.append(item.clone(action="play", url="http://server-2-stream.animemovil.com/V2/", title=title,
post=post))
downl = scrapertools.find_single_match(data, '<div class="descargarCap">.*?<a href="([^"]+)"')
if downl:
downl = downl.replace("&amp;", "&")
itemlist.append(item.clone(action="play", title="Descarga - %s" % item.title, url=downl, server="directo"))
if not itemlist:
itemlist.append(Item(channel=item.channel, title="No hay vídeos disponibles", action=""))
if item.extra == "recientes":
url = scrapertools.find_single_match(data, '<a class="CapList".*?href="([^"]+)"')
if url:
url = host + url
itemlist.append(item.clone(action="episodios", title="Ir a lista de capítulos", url=url, text_color=color1))
elif item.contentType == "movie" and config.get_library_support():
if "No hay vídeos disponibles" not in itemlist[0].title:
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
action="add_pelicula_to_library", contentTitle=item.contentTitle, text_color=color4,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def play(item):
logger.info()
if item.server:
return [item]
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url, item.post).data)
if data["jwplayer"] == False:
content = data["eval"]["contenido"]
urls = scrapertools.find_multiple_matches(content, 'file\s*:\s*"([^"]+)"')
if not urls:
urls = scrapertools.find_multiple_matches(content, '"GET","([^"]+)"')
for url in urls:
if "mediafire" in url:
data_mf = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data_mf, 'kNO\s*=\s*"([^"]+)"')
ext = url[-4:]
itemlist.insert(0, ["%s [directo]" % ext, url])
else:
if data["jwplayer"].get("sources"):
for source in data["jwplayer"]["sources"]:
label = source.get("label", "")
ext = source.get("type", "")
if ext and "/" in ext:
ext = ".%s " % ext.rsplit("/", 1)[1]
url = source.get("file")
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, ["%s%s [directo]" % (ext, label), url])
elif data["jwplayer"].get("file"):
label = data["jwplayer"].get("label", "")
url = data["jwplayer"]["file"]
ext = data["jwplayer"].get("type", "")
if ext and "/" in ext:
ext = "%s " % ext.rsplit("/", 1)[1]
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, [".%s%s [directo]" % (ext, label), url])
return itemlist
def newest(categoria):
logger.info()
item = Item()
try:
item.url = "http://skanime.net/"
item.extra = "novedades"
itemlist = recientes(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -1,8 +1,7 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
@@ -33,7 +32,6 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -71,7 +69,7 @@ def lista_gen(item):
title = scrapedtitle + " [ " + scrapedlang + "]"
itemlist.append(
Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle, context=renumbertools.context(item)))
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">'
@@ -98,7 +96,7 @@ def lista(item):
for link, name in matches:
title = name + " [Latino]"
url = link
context1=[renumbertools.context(item), autoplay.context]
context1=[autoplay.context]
itemlist.append(
item.clone(title=title, url=url, plot=title, action="episodios", show=title,
context=context1))
@@ -129,31 +127,23 @@ def episodios(item):
number = 0
ncap = 0
A = 1
tempo=1
for temp, link, name in matches:
if A != temp:
if A != temp and "Ranma" not in show:
number = 0
number = number + 1
if "Ranma" in show:
number = int(temp)
temp = str(1)
else:
number = number + 1
if number < 10:
capi = "0" + str(number)
else:
capi = str(number)
number,tempo=renumerar_ranma(number,tempo,18+1,1)
number,tempo=renumerar_ranma(number,tempo,22+1,2)
number,tempo=renumerar_ranma(number,tempo,24+1,3)
number,tempo=renumerar_ranma(number,tempo,24+1,4)
number,tempo=renumerar_ranma(number,tempo,24+1,5)
number,tempo=renumerar_ranma(number,tempo,24+1,6)
capi=str(number).zfill(2)
if "Ranma" in show:
season = 1
episode = number
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
date = name
if episode < 10:
capi = "0" + str(episode)
else:
capi = episode
title = str(season) + "x" + str(capi) + " - " + name # "{0}x{1} - ({2})".format(season, episode, date)
title = "{0}x{1} - ({2})".format(str(tempo), capi, name)
else:
title = str(temp) + "x" + capi + " - " + name
title = "{0}x{1} - ({2})".format(str(temp), capi, name)
url = link
A = temp
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show))
@@ -165,6 +155,11 @@ def episodios(item):
return itemlist
def renumerar_ranma(number,tempo,final,actual):
if number==final and tempo==actual:
tempo=tempo+1
number=1
return number, tempo
def findvideos(item):
logger.info()

View File

@@ -30,7 +30,7 @@ def mainlist(item):
data = httptools.downloadpage(CHANNEL_HOST).data
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>")
titulo = "Peliculas (%s)" % total
titulo = "Peliculas"
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
@@ -283,7 +283,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
if type == "descarga": t_tipo = "Descargar"
data = data.replace("\n", "")
if type == "online":
patron = '(?is)class="playex.*?visualizaciones'
patron = '(?is)class="playex.*?sheader'
bloque1 = scrapertools.find_single_match(data, patron)
patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
match = scrapertools.find_multiple_matches(data, patron)
@@ -303,7 +303,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
bloque2 = bloque2.replace("\t", "").replace("\r", "")
patron = '(?s)optn" href="([^"]+)'
patron += '.*?title="([^\.]+)'
patron += '.*?alt="([^\.]+)'
patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
patron += '.*?src="[^>]+"?/>([^<]+)'
patron += '.*?/span>([^<]+)'

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
@@ -53,8 +53,7 @@ def listado(item):
patron += '<b>Categoria:\s*</b>([^&]+)&raquo;\s*([^<]+).*?'
patron += '<div class="OpcionesDescargasMini">(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, title, cat_padres, cat_hijos, opciones in matches:
# logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones)
# Obtenemos el año del titulo y eliminamos lo q sobre
@@ -70,7 +69,7 @@ def listado(item):
thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:]
# Buscamos opcion de ver online
patron = '<a href="http://estrenosly.org/ver-online-([^"]+)'
patron = '<a href="http://estrenos.*?/ver-online-([^"]+)'
url_ver = scrapertools.find_single_match(opciones, patron)
if url_ver:
new_item = Item(channel=item.channel, action="findvideos", title=title,

View File

@@ -98,10 +98,11 @@ def peliculas(item):
url_next_page = ''
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
patron += '<span class="quality">([^<]+)</span><a href="([^"]+)">.*?' # calidad, url
patron += '<span class="quality">([^<]+)</span></div><a href="([^"]+)">.*?' # calidad, url
patron += '<span>([^<]+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron)

View File

@@ -124,7 +124,7 @@ def filtro(item):
patron += '</span>([^<]+)</a>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title in matches:
if "eroti33cas" in title and config.get_setting("adult_mode") == 0:
if "eroticas" in title and config.get_setting("adult_mode") == 0:
continue
itemlist.append(item.clone(action = "peliculas",
title = title.title(),

View File

@@ -55,6 +55,7 @@ def lista(item):
# Paginacion
num_items_x_pagina = 30
min = item.page * num_items_x_pagina
min=min-item.page
max = min + num_items_x_pagina - 1
for link, img, name in matches[min:max]:

View File

@@ -2,11 +2,15 @@
import copy
import re
import sqlite3
import time
from core import filetools
from core import httptools
from core import jsontools
from core import scrapertools
from core.item import InfoLabels
from platformcode import config
from platformcode import logger
# -----------------------------------------------------------------------------------------------------------
@@ -61,6 +65,123 @@ from platformcode import logger
# --------------------------------------------------------------------------------------------------------------
otmdb_global = None
fname = filetools.join(config.get_data_path(), "alfa_db.sqlite")
def create_bd():
conn = sqlite3.connect(fname)
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS tmdb_cache (url TEXT PRIMARY KEY, response TEXT, added TEXT)')
conn.commit()
conn.close()
def drop_bd():
conn = sqlite3.connect(fname)
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS tmdb_cache')
conn.commit()
conn.close()
return True
create_bd()
# El nombre de la funcion es el nombre del decorador y recibe la funcion que decora.
def cache_response(fn):
logger.info()
# import time
# start_time = time.time()
def wrapper(*args):
import base64
def check_expired(ts):
import datetime
valided = False
cache_expire = config.get_setting("tmdb_cache_expire", default=0)
saved_date = datetime.datetime.fromtimestamp(ts)
current_date = datetime.datetime.fromtimestamp(time.time())
elapsed = current_date - saved_date
# 1 day
if cache_expire == 0:
if elapsed > datetime.timedelta(days=1):
valided = False
else:
valided = True
# 7 days
elif cache_expire == 1:
if elapsed > datetime.timedelta(days=7):
valided = False
else:
valided = True
# 15 days
elif cache_expire == 2:
if elapsed > datetime.timedelta(days=15):
valided = False
else:
valided = True
# 1 month - 30 days
elif cache_expire == 3:
# no tenemos en cuenta febrero o meses con 31 días
if elapsed > datetime.timedelta(days=30):
valided = False
else:
valided = True
# no expire
elif cache_expire == 4:
valided = True
return valided
result = {}
try:
# no está activa la cache
if not config.get_setting("tmdb_cache", default=False):
result = fn(*args)
else:
conn = sqlite3.connect(fname)
c = conn.cursor()
url_base64 = base64.b64encode(args[0])
c.execute("SELECT response, added FROM tmdb_cache WHERE url=?", (url_base64,))
row = c.fetchone()
if row and check_expired(float(row[1])):
result = eval(base64.b64decode(row[0]))
# si no se ha obtenido información, llamamos a la funcion
if not result:
result = fn(*args)
result_base64 = base64.b64encode(str(result))
c.execute("INSERT OR REPLACE INTO tmdb_cache (url, response, added) VALUES (?, ?, ?)",
(url_base64, result_base64, time.time()))
conn.commit()
conn.close()
# elapsed_time = time.time() - start_time
# logger.debug("TARDADO %s" % elapsed_time)
# error al obtener los datos
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
return result
return wrapper
def set_infoLabels(source, seekTmdb=True, idioma_busqueda='es'):
@@ -78,6 +199,7 @@ def set_infoLabels(source, seekTmdb=True, idioma_busqueda='es'):
@return: un numero o lista de numeros con el resultado de las llamadas a set_infoLabels_item
@rtype: int, list
"""
start_time = time.time()
if type(source) == list:
ret = set_infoLabels_itemlist(source, seekTmdb, idioma_busqueda)
@@ -95,34 +217,35 @@ def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda='es'):
La API tiene un limite de 40 peticiones por IP cada 10'' y por eso la lista no deberia tener mas de 30 items
para asegurar un buen funcionamiento de esta funcion.
:param item_list: listado de objetos Item que representan peliculas, series o capitulos. El atributo
@param item_list: listado de objetos Item que representan peliculas, series o capitulos. El atributo
infoLabels de cada objeto Item sera modificado incluyendo los datos extras localizados.
:type item_list: list
:param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario
@type item_list: list
@param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario
obtiene los datos del propio Item si existen.
:type seekTmdb: bool
:param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org.
:type idioma_busqueda: str
@type seekTmdb: bool
@param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org.
@type idioma_busqueda: str
:return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo
@return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo
infoLabels de cada Item. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y
negativo en caso contrario.
:rtype: list
@rtype: list
"""
import threading
semaforo = threading.Semaphore(20)
threads_num = config.get_setting("tmdb_threads", default=20)
semaforo = threading.Semaphore(threads_num)
lock = threading.Lock()
r_list = list()
i = 0
l_hilo = list()
def sub_thread(item, _i, _seekTmdb):
def sub_thread(_item, _i, _seekTmdb):
semaforo.acquire()
ret = set_infoLabels_item(item, _seekTmdb, idioma_busqueda, lock)
# logger.debug(str(ret) + "item: " + item.tostring())
ret = set_infoLabels_item(_item, _seekTmdb, idioma_busqueda, lock)
# logger.debug(str(ret) + "item: " + _item.tostring())
semaforo.release()
r_list.append((_i, item, ret))
r_list.append((_i, _item, ret))
for item in item_list:
t = threading.Thread(target=sub_thread, args=(item, i, seekTmdb))
@@ -142,21 +265,22 @@ def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda='es'):
def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
# -----------------------------------------------------------------------------------------------------------
# Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula.
#
# Parametros:
# item: (Item) Objeto Item que representa un pelicula, serie o capitulo. El atributo infoLabels sera
# modificado incluyendo los datos extras localizados.
# (opcional) seekTmdb: (bool) Si es True hace una busqueda en www.themoviedb.org para obtener los datos,
# en caso contrario obtiene los datos del propio Item si existen.
# (opcional) idioma_busqueda: (str) Codigo del idioma segun ISO 639-1, en caso de busqueda en
# www.themoviedb.org.
# Retorna:
# Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo
# item.infoLabels.
# Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario.
# ---------------------------------------------------------------------------------------------------------
"""
Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula.
@param item: Objeto Item que representa un pelicula, serie o capitulo. El atributo infoLabels sera modificado
incluyendo los datos extras localizados.
@type item: Item
@param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario
obtiene los datos del propio Item si existen.
@type seekTmdb: bool
@param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org.
@type idioma_busqueda: str
@param lock: para uso de threads cuando es llamado del metodo 'set_infoLabels_itemlist'
@return: Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo item.infoLabels.
Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario.
@rtype: int
"""
global otmdb_global
def __leer_datos(otmdb_aux):
@@ -183,10 +307,9 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
if lock:
lock.acquire()
if not otmdb_global or (item.infoLabels['tmdb_id'] and
str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \
or (otmdb_global.texto_buscado and
otmdb_global.texto_buscado != item.infoLabels['tvshowtitle']):
if not otmdb_global or (item.infoLabels['tmdb_id']
and str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \
or (otmdb_global.texto_buscado and otmdb_global.texto_buscado != item.infoLabels['tvshowtitle']):
if item.infoLabels['tmdb_id']:
otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda,
idioma_busqueda=idioma_busqueda)
@@ -196,8 +319,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
__leer_datos(otmdb_global)
temporada = otmdb_global.get_temporada(numtemporada)
if lock:
lock.release()
@@ -230,7 +351,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
return len(item.infoLabels)
else:
# Tenemos numero de temporada valido pero no numero de episodio...
# ... buscar datos temporada
@@ -254,7 +374,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
# Buscar...
else:
otmdb = copy.copy(otmdb_global)
# if otmdb is None: # Se elimina por q sino falla al añadir series por falta de imdb, pero por contra provoca mas llamadas
# Busquedas por ID...
if item.infoLabels['tmdb_id']:
# ...Busqueda por tmdb_id
@@ -270,8 +389,7 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
elif tipo_busqueda == 'tv': # buscar con otros codigos
if item.infoLabels['tvdb_id']:
# ...Busqueda por tvdb_id
otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id",
tipo=tipo_busqueda,
otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", tipo=tipo_busqueda,
idioma_busqueda=idioma_busqueda)
elif item.infoLabels['freebase_mid']:
# ...Busqueda por freebase_mid
@@ -303,16 +421,16 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
else:
titulo_buscado = item.fulltitle
otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda,
idioma_busqueda=idioma_busqueda,
filtro=item.infoLabels.get('filtro', {}),
year=item.infoLabels['year'])
otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda,
filtro=item.infoLabels.get('filtro', {}), year=item.infoLabels['year'])
if otmdb.get_id() and not lock:
if otmdb.get_id() and config.get_setting("tmdb_plus_info", default=False):
# Si la busqueda ha dado resultado y no se esta buscando una lista de items,
# realizar otra busqueda para ampliar la informacion
otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda,
idioma_busqueda=idioma_busqueda)
otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda)
if lock and lock.locked():
lock.release()
if otmdb is not None and otmdb.get_id():
# La busqueda ha encontrado un resultado valido
@@ -386,8 +504,8 @@ def find_and_set_infoLabels(item):
def get_nfo(item):
"""
Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi,
para tmdb funciona solo pasandole la url
Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, para tmdb funciona
solo pasandole la url.
@param item: elemento que contiene los datos necesarios para generar la info
@type item: Item
@rtype: str
@@ -427,9 +545,9 @@ class ResultDictDefault(dict):
return self.__missing__(key)
def __missing__(self, key):
'''
"""
valores por defecto en caso de que la clave solicitada no exista
'''
"""
if key in ['genre_ids', 'genre', 'genres']:
return list()
elif key == 'images_posters':
@@ -677,14 +795,44 @@ class Tmdb(object):
else:
logger.debug("Creado objeto vacio")
@staticmethod
@cache_response
def get_json(url):
try:
result = httptools.downloadpage(url, cookies=False)
res_headers = result.headers
# logger.debug("res_headers es %s" % res_headers)
dict_data = jsontools.load(result.data)
# logger.debug("result_data es %s" % dict_data)
if "status_code" in dict_data:
logger.debug("\nError de tmdb: %s %s" % (dict_data["status_code"], dict_data["status_message"]))
if dict_data["status_code"] == 25:
while "status_code" in dict_data and dict_data["status_code"] == 25:
wait = int(res_headers['retry-after'])
logger.debug("Limite alcanzado, esperamos para volver a llamar en ...%s" % wait)
time.sleep(wait)
# logger.debug("RE Llamada #%s" % d)
result = httptools.downloadpage(url, cookies=False)
res_headers = result.headers
# logger.debug("res_headers es %s" % res_headers)
dict_data = jsontools.load(result.data)
# logger.debug("result_data es %s" % dict_data)
# error al obtener los datos
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
dict_data = {}
return dict_data
@classmethod
def rellenar_dic_generos(cls, tipo='movie', idioma='es'):
resultado = {}
# Si se busca en idioma catalán, se cambia a español para el diccionario de géneros
if idioma == "ca":
idioma = "es"
# Rellenar diccionario de generos del tipo e idioma pasados como parametros
if idioma not in cls.dic_generos:
cls.dic_generos[idioma] = {}
@@ -695,21 +843,16 @@ class Tmdb(object):
% (tipo, idioma))
try:
logger.info("[Tmdb.py] Rellenando dicionario de generos")
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
resultado = cls.get_json(url)
lista_generos = resultado["genres"]
for i in lista_generos:
cls.dic_generos[idioma][tipo][str(i["id"])] = i["name"]
except:
pass
if "status_code" in resultado:
msg = "Error de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.error(msg)
logger.error("Error generando diccionarios")
def __by_id(self, source='tmdb'):
resultado = {}
buscando = ""
if self.busqueda_id:
if source == "tmdb":
@@ -728,31 +871,26 @@ class Tmdb(object):
buscando = "%s: %s" % (source.capitalize(), self.busqueda_id)
logger.info("[Tmdb.py] Buscando %s:\n%s" % (buscando, url))
resultado = self.get_json(url)
try:
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
if resultado:
if source != "tmdb":
if self.busqueda_tipo == "movie":
resultado = resultado["movie_results"][0]
else:
resultado = resultado["tv_results"][0]
except:
resultado = {}
if resultado and not "status_code" in resultado:
self.results = [resultado]
self.total_results = 1
self.total_pages = 1
self.result = ResultDictDefault(resultado)
else:
# No hay resultados de la busqueda
msg = "La busqueda de %s no dio resultados." % buscando
if "status_code" in resultado:
msg += "\nError de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.debug(msg)
self.results = [resultado]
self.total_results = 1
self.total_pages = 1
self.result = ResultDictDefault(resultado)
else:
# No hay resultados de la busqueda
msg = "La busqueda de %s no dio resultados." % buscando
logger.debug(msg)
def __search(self, index_results=0, page=1):
resultado = {}
self.result = ResultDictDefault()
results = []
total_results = 0
@@ -767,17 +905,14 @@ class Tmdb(object):
self.busqueda_idioma, self.busqueda_include_adult, page))
if self.busqueda_year:
url += '&year=%s' % (self.busqueda_year)
url += '&year=%s' % self.busqueda_year
buscando = self.busqueda_texto.capitalize()
logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url))
resultado = self.get_json(url)
try:
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
total_results = resultado["total_results"]
total_pages = resultado["total_pages"]
except:
total_results = 0
total_results = resultado.get("total_results", 0)
total_pages = resultado.get("total_pages", 0)
if total_results > 0:
results = resultado["results"]
@@ -808,13 +943,10 @@ class Tmdb(object):
else:
# No hay resultados de la busqueda
msg = "La busqueda de '%s' no dio resultados para la pagina %s" % (buscando, page)
if "status_code" in resultado:
msg += "\nError de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.error(msg)
return 0
def __discover(self, index_results=0):
resultado = {}
self.result = ResultDictDefault()
results = []
total_results = 0
@@ -834,17 +966,10 @@ class Tmdb(object):
% (type_search, "&".join(params)))
logger.info("[Tmdb.py] Buscando %s:\n%s" % (type_search, url))
resultado = self.get_json(url)
try:
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
total_results = resultado["total_results"]
total_pages = resultado["total_pages"]
except:
if resultado and not "status_code" in resultado:
total_results = -1
total_pages = 1
else:
total_results = 0
total_results = resultado.get("total_results", -1)
total_pages = resultado.get("total_pages", 1)
if total_results > 0:
results = resultado["results"]
@@ -979,7 +1104,6 @@ class Tmdb(object):
:return: Devuelve la sinopsis de una pelicula o serie
:rtype: str
"""
resultado = {}
ret = ""
if 'id' in self.result:
@@ -994,19 +1118,13 @@ class Tmdb(object):
url = ('http://api.themoviedb.org/3/%s/%s?api_key=6889f6089877fd092454d00edb44a84d&language=%s' %
(self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma))
try:
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
except:
pass
resultado = self.get_json(url)
if 'overview' in resultado:
self.result['overview'] = resultado['overview']
ret = self.result['overview']
if "status_code" in resultado:
msg = "Error de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.debug(msg)
return ret
def get_poster(self, tipo_respuesta="str", size="original"):
@@ -1133,18 +1251,22 @@ class Tmdb(object):
buscando = "id_Tmdb: " + str(self.result["id"]) + " temporada: " + str(numtemporada) + "\nURL: " + url
logger.info("[Tmdb.py] Buscando " + buscando)
try:
self.temporada[numtemporada] = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
except:
self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"}
# self.temporada[numtemporada] = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
self.temporada[numtemporada] = self.get_json(url)
if "status_code" in self.temporada[numtemporada]:
# Se ha producido un error
msg = "La busqueda de " + buscando + " no dio resultados."
msg += "\nError de tmdb: %s %s" % (
self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
logger.debug(msg)
except:
logger.error("No se ha podido obtener la temporada")
self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"}
self.temporada[numtemporada] = {"episodes": {}}
# if "status_code" in self.temporada[numtemporada]:
# # Se ha producido un error
# msg = "La busqueda de " + buscando + " no dio resultados."
# msg += "\nError de tmdb: %s %s" % (
# self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
# logger.debug(msg)
# self.temporada[numtemporada] = {"episodes": {}}
return self.temporada[numtemporada]
def get_episodio(self, numtemporada=1, capitulo=1):
@@ -1242,10 +1364,8 @@ class Tmdb(object):
# Primera búsqueda de videos en el idioma de busqueda
url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d&language=%s" \
% (self.busqueda_tipo, self.result['id'], self.busqueda_idioma)
try:
dict_videos = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
except:
pass
dict_videos = self.get_json(url)
if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
@@ -1255,19 +1375,13 @@ class Tmdb(object):
if self.busqueda_idioma != 'en':
url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d" \
% (self.busqueda_tipo, self.result['id'])
try:
dict_videos = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
except:
pass
dict_videos = self.get_json(url)
if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
self.result["videos"].extend(dict_videos['results'])
if "status_code" in dict_videos:
msg = "Error de tmdb: %s %s" % (dict_videos["status_code"], dict_videos["status_message"])
logger.debug(msg)
# Si las busqueda han obtenido resultados devolver un listado de objetos
for i in self.result['videos']:
if i['site'] == "YouTube":
@@ -1316,7 +1430,8 @@ class Tmdb(object):
if ret_infoLabels['season'] and self.temporada.get(ret_infoLabels['season']):
# Si hay datos cargados de la temporada indicada
episodio = -1
if ret_infoLabels['episode']: episodio = ret_infoLabels['episode']
if ret_infoLabels['episode']:
episodio = ret_infoLabels['episode']
items.extend(self.get_episodio(ret_infoLabels['season'], episodio).items())
@@ -1371,8 +1486,10 @@ class Tmdb(object):
ret_infoLabels['imdb_id'] = v
elif k == 'external_ids':
if 'tvdb_id' in v: ret_infoLabels['tvdb_id'] = v['tvdb_id']
if 'imdb_id' in v: ret_infoLabels['imdb_id'] = v['imdb_id']
if 'tvdb_id' in v:
ret_infoLabels['tvdb_id'] = v['tvdb_id']
if 'imdb_id' in v:
ret_infoLabels['imdb_id'] = v['imdb_id']
elif k in ['genres', "genre_ids", "genre"]:
ret_infoLabels['genre'] = self.get_generos(origen)
@@ -1405,7 +1522,7 @@ class Tmdb(object):
elif isinstance(v[0], dict):
# {'iso_3166_1': 'FR', 'name':'France'}
for i in v:
if i.has_key('iso_3166_1'):
if 'iso_3166_1' in i:
pais = Tmdb.dic_country.get(i['iso_3166_1'], i['iso_3166_1'])
l_country = list(set(l_country + [pais]))
@@ -1421,7 +1538,6 @@ class Tmdb(object):
for crew in v:
l_writer = list(set(l_writer + [crew['name']]))
elif isinstance(v, str) or isinstance(v, int) or isinstance(v, float):
ret_infoLabels[k] = v

View File

@@ -19,12 +19,7 @@ from platformcode import platformtools
HOST = "https://api.thetvdb.com"
HOST_IMAGE = "http://thetvdb.com/banners/"
# comprobación tras el cambio de tipos en config.get_setting
if config.get_setting("tvdb_token") is not None:
TOKEN = config.get_setting("tvdb_token")
else:
TOKEN = ""
TOKEN = config.get_setting("tvdb_token", default="")
DEFAULT_LANG = "es"
DEFAULT_HEADERS = {
'Content-Type': 'application/json',
@@ -97,7 +92,7 @@ def find_and_set_infoLabels(item):
otvdb_global = Tvdb(imdb_id=item.infoLabels.get("imdb_id"))
elif not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']:
otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id']) # , tipo=tipo_busqueda, idioma_busqueda="es")
otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id'])
if not item.contentSeason:
p_dialog.update(50, "Buscando información de la serie", "Obteniendo resultados...")

View File

@@ -127,6 +127,11 @@ def run(item=None):
else:
return keymaptools.set_key()
elif item.action == "script":
from core import tmdb
if tmdb.drop_bd():
platformtools.dialog_notification("Alfa", "caché eliminada", time=2000, sound=False)
# Action in certain channel specified in "action" and "channel" parameters
else:

View File

@@ -48,6 +48,15 @@
<setting label="Botones/Teclas de acceso (Cambios requieren reiniciar Kodi)" type="lsep"/>
<setting id="shortcut_key" type="action" label="30999" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIg0KfQ==)" />
<setting type="sep"/>
<setting label="TheMovieDB (obtiene datos de las películas o series)" type="lsep"/>
<setting id="tmdb_threads" type="labelenum" values="5|10|15|20|25|30" label="Búsquedas simultáneas (puede causar inestabilidad)" default="20"/>
<setting id="tmdb_plus_info" type="bool" label="Buscar información extendida (datos de actores) Aumenta el tiempo de búsqueda" default="false"/>
<setting id="tmdb_cache" type="bool" label="Usar caché (mejora las búsquedas recurrentes)" default="true"/>
<setting id="tmdb_cache_expire" type="enum" lvalues="cada 1 día|cada 7 días|cada 15 días|cada 30 días|No" label="¿Renovar caché?" enable="eq(-1,true)" default="4"/>
<setting id="tmdb_clean_db_cache" type="action" label="Pulse para 'Borrar caché' guardada" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAic2NyaXB0Ig0KfQ==)" />
</category>
</settings>

View File

@@ -32,13 +32,18 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
'Cookie': ''}
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, '(?s)SRC="(https://www.flashx.tv/counter.cgi\?fx=[^"]+)')
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = scrapertools.find_single_match(data, 'src="(https://www.flashx.tv/js/code.js\?cache=[0-9]+)')
js_fxfx = scrapertools.find_single_match(data, 'src="(https://www.flashx.tv/js/code.js.*?cache=[0-9]+)')
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches:
pfxfx += f + "=" + v + "&"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'}
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
@@ -51,10 +56,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers['Referer'] = "https://www.flashx.tv/"
headers['Accept'] = "*/*"
headers['Host'] = "www.flashx.tv"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
headers['X-Requested-With'] = 'XMLHttpRequest'
httptools.downloadpage(coding_url, headers=headers)
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
httptools.downloadpage(coding_url, headers=headers, replace_headers=True)
httptools.downloadpage(cgi_counter, headers=headers, replace_headers=True)
try:
time.sleep(int(wait_time) + 1)
@@ -63,7 +69,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers.pop('X-Requested-With')
headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data
data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
@@ -71,7 +77,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
try:
data = httptools.downloadpage(url_reload, cookies=False).data
data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data
data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
except:
pass

View File

@@ -11,27 +11,20 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data
if "Not Found" in data:
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
if "Video is processing" in data:
return False, "[streamixcloud] El video se está procesando, inténtelo mas tarde"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
packed = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script")
data = jsunpack.unpack(packed)
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
# thumb = scrapertools.find_single_match(data, '\],image:"([^"]+)"')
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
for url in media_url:
video_urls.append(["%s [streamixcloud]" % ext, url])
return video_urls