25 Commits

Author SHA1 Message Date
alfa-addon
d8889b1592 v2.3.0 2017-10-25 18:54:03 -04:00
alfa-addon
410d947e4b fixed 2017-10-25 18:53:49 -04:00
Alfa
a1339a5545 Merge pull request #137 from Intel11/patch-1
Actualizado
2017-10-26 00:56:56 +02:00
Intel1
a7e18ef813 allpeliculas: Agregado sección - Colecciones 2017-10-25 17:34:11 -05:00
Alfa
15e06d4386 Merge pull request #138 from Alfa-beto/Fixes
Correcciones varias
2017-10-26 00:24:59 +02:00
Alfa
574279c2da Merge pull request #140 from danielr460/master
Arreglos Menores
2017-10-26 00:24:44 +02:00
Alfa
2a1c1fb081 Merge pull request #141 from alfa-jor/master
cache tmdb
2017-10-26 00:24:29 +02:00
alfa_addon_10
df1fbe3b47 fix 2017-10-25 19:48:10 +02:00
Intel1
52344e42cc pelismundo: fix filtrado de genero adulto 2017-10-25 10:37:17 -05:00
Intel1
d725443479 Update animemovil.json 2017-10-25 08:19:37 -05:00
Intel1
c70f107ff1 animemovil actualizado para Alfa 2017-10-24 13:28:05 -05:00
alfa_addon_10
f29911cd52 human being text 2017-10-24 20:00:11 +02:00
alfa_addon_10
90c335df63 splited options, human readibility 2017-10-24 19:29:10 +02:00
alfa_addon_10
cfc8b41a5a Merge branch 'master' of https://github.com/alfa-addon/addon 2017-10-24 18:48:12 +02:00
alfa_addon_10
5a332243e0 tmdb cache and configuration 2017-10-24 18:47:02 +02:00
Intel1
9fc9bc1fd5 estrenosgo: actualizado url de videos 2017-10-24 10:06:27 -05:00
unknown
c91ae53fba Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-24 08:25:11 -03:00
danielr460
5f5888a539 Las primeras series de cada página se eliminaban 2017-10-23 14:32:07 -05:00
danielr460
597fa9a7e0 Eliminado Renumber tools porque era innecesario, y se agrego la renumeración a la única serie que no lo tenia (Ranma 1/2) 2017-10-23 12:22:51 -05:00
Intel1
6f0680219f streamixclud: fix test_video_exists 2017-10-23 12:22:33 -05:00
Intel1
b863f0ea20 animeflv.ru: actualizado findvideos 2017-10-23 12:11:16 -05:00
danielr460
4dcc6395be Arreglos Menores 2017-10-23 11:37:53 -05:00
Intel1
107262cef3 cinetux: patron actualizado 2017-10-23 10:38:23 -05:00
Unknown
b9b1cc6945 Mejora en el codigo de Pelisplus 2017-10-21 14:23:43 -03:00
Intel1
5fa341950c flashx: fix again 2017-10-21 12:12:34 -05:00
18 changed files with 851 additions and 233 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?> <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.2.4" provider-name="Alfa Addon"> <addon id="plugin.video.alfa" name="Alfa" version="2.3.0" provider-name="Alfa Addon">
<requires> <requires>
<import addon="xbmc.python" version="2.1.0"/> <import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/> <import addon="script.module.libtorrent" optional="true"/>
@@ -19,12 +19,14 @@
</assets> </assets>
<news>[B]Estos son los cambios para esta versión:[/B] <news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» playmax » playpornx » cartoonlatino » serieslan
» canalporno » divxatope » pelisplus » pedropolis
» flashx » verpeliculasnuevas » flashx » cinetux
» animeflv_me » hdfull » animeflv_ru » streamixcloud
» pelismundo » downace » estrenosgo » animemovil
» gamovideo ¤ arreglos internos » allpeliculas » pelismundo
¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
</news> </news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary> <summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -35,12 +35,62 @@ def mainlist(item):
url= host + "movies/newmovies?page=1", extra1 = 0)) url= host + "movies/newmovies?page=1", extra1 = 0))
itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png", itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/getGanres")) url= host + "movies/getGanres"))
itemlist.append(item.clone(title="Colecciones", action="colecciones", fanart="http://i.imgur.com/c3HS8kj.png",
url= host))
itemlist.append(item.clone(title="", action="")) itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search")) itemlist.append(item.clone(title="Buscar...", action="search"))
return itemlist return itemlist
def colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'href="(/peliculas[^"]+).*?'
patron += 'title_geo"><span>([^<]+).*?'
patron += 'title_eng"><span>([^<]+).*?'
patron += 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedcantidad, scrapedthumbnail in matches:
if scrapedtitle == "LGTB" and config.get_setting("adult_mode") == 0:
continue
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
thumbnail = host + scrapedthumbnail,
title = title,
url = host + scrapedurl
))
return itemlist
def listado_colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
post = "page=1"
data = httptools.downloadpage(host + data_url, post=post).data
patron = 'a href="(/peli[^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'class="c_fichas_title">([^<]+).*?'
patron += 'Año:.*?href="">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
item.infoLabels['year'] = scrapedyear
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = host + scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def generos(item): def generos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
@@ -61,6 +111,9 @@ def findvideos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
if "Próximamente" in data:
itemlist.append(Item(channel = item.channel, title = "Próximamente"))
return itemlist
patron = 'data-link="([^"]+).*?' patron = 'data-link="([^"]+).*?'
patron += '>([^<]+)' patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data, patron)
@@ -137,7 +190,7 @@ def lista(item):
def search(item, texto): def search(item, texto):
logger.info() logger.info()
if texto != "": if texto != "":
texto = texto.replace(" ", "+") texto = texto.replace(" ", "%20")
item.url = host + "/movies/search/" + texto item.url = host + "/movies/search/" + texto
item.extra = "busqueda" item.extra = "busqueda"
try: try:

View File

@@ -162,27 +162,20 @@ def novedades_anime(item):
def listado(item): def listado(item):
logger.info() logger.info()
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
# logger.debug("datito %s" % data)
url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">') url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination') data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')
matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?' matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
'<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>', '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
re.DOTALL).findall(data) re.DOTALL).findall(data)
itemlist = [] itemlist = []
for thumbnail, url, title, genres, plot in matches: for thumbnail, url, title, genres, plot in matches:
title = clean_title(title) title = clean_title(title)
url = urlparse.urljoin(HOST, url) url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail) thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot) fulltitle=title, plot=plot)
@@ -192,28 +185,22 @@ def listado(item):
else: else:
new_item.show = title new_item.show = title
new_item.context = renumbertools.context(item) new_item.context = renumbertools.context(item)
itemlist.append(new_item) itemlist.append(new_item)
if url_pagination: if url_pagination:
url = urlparse.urljoin(HOST, url_pagination) url = urlparse.urljoin(HOST, url_pagination)
title = ">> Pagina Siguiente" title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url)) itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
return itemlist return itemlist
def episodios(item): def episodios(item):
logger.info() logger.info()
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.plot == "": if item.plot == "":
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>') item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>') data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>')
matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data) matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data)
@@ -221,7 +208,6 @@ def episodios(item):
title = title.strip() title = title.strip()
url = urlparse.urljoin(item.url, url) url = urlparse.urljoin(item.url, url)
thumbnail = item.thumbnail thumbnail = item.thumbnail
try: try:
episode = int(scrapertools.find_single_match(title, "Episodio (\d+)")) episode = int(scrapertools.find_single_match(title, "Episodio (\d+)"))
except ValueError: except ValueError:
@@ -229,42 +215,36 @@ def episodios(item):
episode = 1 episode = 1
else: else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title, itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title,
fanart=thumbnail, contentType="episode")) fanart=thumbnail, contentType="episode"))
return itemlist return itemlist
def findvideos(item): def findvideos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
_id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/') _id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/')
post = "embed_id=%s" % _id post = "embed_id=%s" % _id
data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data
dict_data = jsontools.load(data) dict_data = jsontools.load(data)
headers = dict() headers = dict()
headers["Referer"] = item.url headers["Referer"] = item.url
data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data
dict_data = jsontools.load(data) dict_data = jsontools.load(data)
if not dict_data:
list_videos = dict_data["playlist"][0]["sources"] return itemlist
list_videos = dict_data["playlist"][0]
if isinstance(list_videos, list): if isinstance(list_videos, list):
for video in list_videos: for video in list_videos:
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show), itemlist.append(Item(channel=item.channel, action="play", url=video["file"],
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title, show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail)) thumbnail=item.thumbnail))
else: else:
for video in list_videos.values(): for video in list_videos.values():
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show), video += "|User-Agent=Mozilla/5.0"
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title, itemlist.append(Item(channel=item.channel, action="play", url=video, show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail)) thumbnail=item.thumbnail))
return itemlist return itemlist

View File

@@ -0,0 +1,50 @@
{
"id": "animemovil",
"name": "Animemovil",
"active": true,
"adult": false,
"language": ["*"],
"thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png",
"banner": "",
"version": 1,
"changes": [
{
"date": "24/10/2017",
"description": "Primera version"
}
],
"categories": [
"anime"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,406 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from core import httptools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import platformtools, config, logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
__perfil__ = int(config.get_setting('perfil', "animemovil"))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://animemovil.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", thumbnail=item.thumbnail,
url="%s/_API/?src=animesRecientes&offset=0" % host, text_color=color1))
itemlist.append(Item(channel=item.channel, action="emision", title="En emisión", thumbnail=item.thumbnail,
url="%s/anime/emision" % host, text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
text_color=color2))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
thumbnail=item.thumbnail, text_color=color3))
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
if renumbertools.context:
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def openconfig(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
item.url = "%s/?s=%s" % (host, texto.replace(" ", "+"))
try:
return recientes(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def recientes(item):
logger.info()
item.contentType = "tvshow"
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="emision"(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
try:
contentTitle = re.split(r"(?i) \d+ (?:Sub Español|Audio Español|Español Latino)", title)[0]
except:
contentTitle = ""
contentTitle = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", contentTitle)
tipo = "tvshow"
show = contentTitle
action = "episodios"
context = renumbertools.context
if item.extra == "recientes":
action = "findvideos"
context = ""
if not item.extra and (url.endswith("-pelicula/") or url.endswith("-pelicula")):
tipo = "movie"
show = ""
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
thumb_=thumb, contentType=tipo, context=context))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.extra and itemlist:
for it in itemlist:
it.thumbnail = it.thumb_
except:
pass
return itemlist
def listado(item):
logger.info()
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url).data)
for it in data.get("items", []):
scrapedtitle = it["title"]
url = "%s/%s" % (host, it["url"])
thumb = "http://img.animemovil.com/w440-h250-c/%s" % it["img"]
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
context=renumbertools.context, contentType=tipo))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if data["buttom"] and itemlist:
offset = int(scrapertools.find_single_match(item.url, 'offset=(\d+)')) + 1
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
def indices(item):
logger.info()
itemlist = []
if "Índices" in item.title:
itemlist.append(item.clone(title="Por Género", url="%s/anime/generos/" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime/" % host))
itemlist.append(item.clone(action="completo", title="Lista completa de Animes",
url="%s/anime/lista/" % host))
else:
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<div class="letras">(.*?)</div>')
patron = '<a title="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for title in matches:
if "Letra" in item.title:
url = "%s/_API/?src=animesLetra&offset=0&letra=%s" % (host, title)
else:
url = "%s/_API/?src=animesGenero&offset=0&genero=%s" % (host, title)
itemlist.append(item.clone(action="listado", url=url, title=title))
return itemlist
def completo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="listadoAnime">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
scrapedtitle = title
thumb = thumb.replace("s90-c", "w440-h250-c")
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, thumbnail=thumb,
text_color=color3, contentTitle=title, contentSerieName=show, extra="completo",
context=renumbertools.context, contentType=tipo, infoLabels=infoLabels))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
show = scrapertools.find_single_match(data, '<title>\s*([^<]+)\s*</title>')
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
for url, title in matches:
url = host + url
epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
new_item = item.clone(action="findvideos", url=url, title=title, extra="", context=renumbertools.context)
if epi:
season, episode = renumbertools.numbered_for_tratk(
item.channel, show, 1, int(epi))
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.title = "%sx%s %s" % (season, episode, title)
itemlist.append(new_item)
if item.infoLabels.get("tmdb_id") or item.extra == "recientes" or item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if config.get_videolibrary_support() and itemlist:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, text_color=color4, fanart=item.fanart,
thumbnail=item.thumbnail))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
if item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
data = httptools.downloadpage(item.url).data
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
if len(matches) == 1:
item.url = host + matches[0][0]
itemlist = findvideos(item)
else:
for url, title in matches:
itemlist.append(item.clone(action="findvideos", title=title, url=url, extra=""))
return itemlist
def emision(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloques = scrapertools.find_multiple_matches(data, '<div class="horario">.*?</i>\s*(.*?)</span>(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
for dia, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
itemlist.append(item.clone(action="", title=dia, text_color=color1))
for url, title, thumb in matches:
url = host + url
scrapedtitle = " %s" % title
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", title)
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context, infoLabels=infoLabels))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
id = scrapertools.find_single_match(data, '"id":"([^"]+)"')
bloque = scrapertools.find_single_match(data, 'ul class="controles">(.*?)</ul>')
patron = '<li title="([^"]+)" id="[^"]*" host="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for title, server in matches:
if title == "Vizard":
continue
title = "%s - %s" % (title, item.title)
post = "host=%s&id=%s" % (server, id)
itemlist.append(item.clone(action="play", url="http://server-2-stream.animemovil.com/V2/", title=title,
post=post))
downl = scrapertools.find_single_match(data, '<div class="descargarCap">.*?<a href="([^"]+)"')
if downl:
downl = downl.replace("&amp;", "&")
itemlist.append(item.clone(action="play", title="Descarga - %s" % item.title, url=downl, server="directo"))
if not itemlist:
itemlist.append(Item(channel=item.channel, title="No hay vídeos disponibles", action=""))
if item.extra == "recientes":
url = scrapertools.find_single_match(data, '<a class="CapList".*?href="([^"]+)"')
if url:
url = host + url
itemlist.append(item.clone(action="episodios", title="Ir a lista de capítulos", url=url, text_color=color1))
elif item.contentType == "movie" and config.get_library_support():
if "No hay vídeos disponibles" not in itemlist[0].title:
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
action="add_pelicula_to_library", contentTitle=item.contentTitle, text_color=color4,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def play(item):
logger.info()
if item.server:
return [item]
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url, item.post).data)
if data["jwplayer"] == False:
content = data["eval"]["contenido"]
urls = scrapertools.find_multiple_matches(content, 'file\s*:\s*"([^"]+)"')
if not urls:
urls = scrapertools.find_multiple_matches(content, '"GET","([^"]+)"')
for url in urls:
if "mediafire" in url:
data_mf = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data_mf, 'kNO\s*=\s*"([^"]+)"')
ext = url[-4:]
itemlist.insert(0, ["%s [directo]" % ext, url])
else:
if data["jwplayer"].get("sources"):
for source in data["jwplayer"]["sources"]:
label = source.get("label", "")
ext = source.get("type", "")
if ext and "/" in ext:
ext = ".%s " % ext.rsplit("/", 1)[1]
url = source.get("file")
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, ["%s%s [directo]" % (ext, label), url])
elif data["jwplayer"].get("file"):
label = data["jwplayer"].get("label", "")
url = data["jwplayer"]["file"]
ext = data["jwplayer"].get("type", "")
if ext and "/" in ext:
ext = "%s " % ext.rsplit("/", 1)[1]
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, [".%s%s [directo]" % (ext, label), url])
return itemlist
def newest(categoria):
logger.info()
item = Item()
try:
item.url = "http://skanime.net/"
item.extra = "novedades"
itemlist = recientes(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -1,8 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import re import re
from channels import renumbertools
from channelselector import get_thumb from channelselector import get_thumb
from core import httptools from core import httptools
from core import scrapertools from core import scrapertools
@@ -33,7 +32,6 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host, itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series)) thumbnail=thumb_series))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist) autoplay.show_option(item.channel, itemlist)
return itemlist return itemlist
@@ -71,7 +69,7 @@ def lista_gen(item):
title = scrapedtitle + " [ " + scrapedlang + "]" title = scrapedtitle + " [ " + scrapedlang + "]"
itemlist.append( itemlist.append(
Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios", Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle, context=renumbertools.context(item))) show=scrapedtitle))
tmdb.set_infoLabels(itemlist) tmdb.set_infoLabels(itemlist)
# Paginacion # Paginacion
patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">' patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">'
@@ -98,7 +96,7 @@ def lista(item):
for link, name in matches: for link, name in matches:
title = name + " [Latino]" title = name + " [Latino]"
url = link url = link
context1=[renumbertools.context(item), autoplay.context] context1=[autoplay.context]
itemlist.append( itemlist.append(
item.clone(title=title, url=url, plot=title, action="episodios", show=title, item.clone(title=title, url=url, plot=title, action="episodios", show=title,
context=context1)) context=context1))
@@ -129,31 +127,23 @@ def episodios(item):
number = 0 number = 0
ncap = 0 ncap = 0
A = 1 A = 1
tempo=1
for temp, link, name in matches: for temp, link, name in matches:
if A != temp: if A != temp and "Ranma" not in show:
number = 0 number = 0
number = number + 1
if "Ranma" in show: if "Ranma" in show:
number = int(temp) number,tempo=renumerar_ranma(number,tempo,18+1,1)
temp = str(1) number,tempo=renumerar_ranma(number,tempo,22+1,2)
else: number,tempo=renumerar_ranma(number,tempo,24+1,3)
number = number + 1 number,tempo=renumerar_ranma(number,tempo,24+1,4)
if number < 10: number,tempo=renumerar_ranma(number,tempo,24+1,5)
capi = "0" + str(number) number,tempo=renumerar_ranma(number,tempo,24+1,6)
else: capi=str(number).zfill(2)
capi = str(number)
if "Ranma" in show: if "Ranma" in show:
season = 1 title = "{0}x{1} - ({2})".format(str(tempo), capi, name)
episode = number
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
date = name
if episode < 10:
capi = "0" + str(episode)
else:
capi = episode
title = str(season) + "x" + str(capi) + " - " + name # "{0}x{1} - ({2})".format(season, episode, date)
else: else:
title = str(temp) + "x" + capi + " - " + name title = "{0}x{1} - ({2})".format(str(temp), capi, name)
url = link url = link
A = temp A = temp
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show))
@@ -165,6 +155,11 @@ def episodios(item):
return itemlist return itemlist
def renumerar_ranma(number,tempo,final,actual):
if number==final and tempo==actual:
tempo=tempo+1
number=1
return number, tempo
def findvideos(item): def findvideos(item):
logger.info() logger.info()

View File

@@ -30,7 +30,7 @@ def mainlist(item):
data = httptools.downloadpage(CHANNEL_HOST).data data = httptools.downloadpage(CHANNEL_HOST).data
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>") total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>")
titulo = "Peliculas (%s)" % total titulo = "Peliculas"
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True)) itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula", itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
@@ -283,7 +283,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
if type == "descarga": t_tipo = "Descargar" if type == "descarga": t_tipo = "Descargar"
data = data.replace("\n", "") data = data.replace("\n", "")
if type == "online": if type == "online":
patron = '(?is)class="playex.*?visualizaciones' patron = '(?is)class="playex.*?sheader'
bloque1 = scrapertools.find_single_match(data, patron) bloque1 = scrapertools.find_single_match(data, patron)
patron = '(?is)#(option-[^"]+).*?png">([^<]+)' patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
match = scrapertools.find_multiple_matches(data, patron) match = scrapertools.find_multiple_matches(data, patron)
@@ -303,7 +303,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single') bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
bloque2 = bloque2.replace("\t", "").replace("\r", "") bloque2 = bloque2.replace("\t", "").replace("\r", "")
patron = '(?s)optn" href="([^"]+)' patron = '(?s)optn" href="([^"]+)'
patron += '.*?title="([^\.]+)' patron += '.*?alt="([^\.]+)'
patron += '.*?src.*?src="[^>]+"?/>([^<]+)' patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
patron += '.*?src="[^>]+"?/>([^<]+)' patron += '.*?src="[^>]+"?/>([^<]+)'
patron += '.*?/span>([^<]+)' patron += '.*?/span>([^<]+)'

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import re import re
from channelselector import get_thumb from channelselector import get_thumb
@@ -53,8 +53,7 @@ def listado(item):
patron += '<b>Categoria:\s*</b>([^&]+)&raquo;\s*([^<]+).*?' patron += '<b>Categoria:\s*</b>([^&]+)&raquo;\s*([^<]+).*?'
patron += '<div class="OpcionesDescargasMini">(.*?)</div>' patron += '<div class="OpcionesDescargasMini">(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, title, cat_padres, cat_hijos, opciones in matches: for thumbnail, title, cat_padres, cat_hijos, opciones in matches:
# logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones) # logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones)
# Obtenemos el año del titulo y eliminamos lo q sobre # Obtenemos el año del titulo y eliminamos lo q sobre
@@ -70,7 +69,7 @@ def listado(item):
thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:] thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:]
# Buscamos opcion de ver online # Buscamos opcion de ver online
patron = '<a href="http://estrenosly.org/ver-online-([^"]+)' patron = '<a href="http://estrenos.*?/ver-online-([^"]+)'
url_ver = scrapertools.find_single_match(opciones, patron) url_ver = scrapertools.find_single_match(opciones, patron)
if url_ver: if url_ver:
new_item = Item(channel=item.channel, action="findvideos", title=title, new_item = Item(channel=item.channel, action="findvideos", title=title,

View File

@@ -98,10 +98,11 @@ def peliculas(item):
url_next_page = '' url_next_page = ''
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data) data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
patron += '<span class="quality">([^<]+)</span><a href="([^"]+)">.*?' # calidad, url patron += '<span class="quality">([^<]+)</span></div><a href="([^"]+)">.*?' # calidad, url
patron += '<span>([^<]+)</span>' # year patron += '<span>([^<]+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data, patron)

View File

@@ -124,7 +124,7 @@ def filtro(item):
patron += '</span>([^<]+)</a>' patron += '</span>([^<]+)</a>'
matches = scrapertools.find_multiple_matches(bloque, patron) matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title in matches: for url, title in matches:
if "eroti33cas" in title and config.get_setting("adult_mode") == 0: if "eroticas" in title and config.get_setting("adult_mode") == 0:
continue continue
itemlist.append(item.clone(action = "peliculas", itemlist.append(item.clone(action = "peliculas",
title = title.title(), title = title.title(),

View File

@@ -195,8 +195,9 @@ def lista(item):
# de tmdb # de tmdb
filtro_list = filtro_list.items() filtro_list = filtro_list.items()
if item.title != 'Buscar': if item.title != 'Buscar':
itemlist.append( new_item=(
Item(channel=item.channel, Item(channel=item.channel,
contentType=tipo, contentType=tipo,
action=accion, action=accion,
@@ -205,11 +206,14 @@ def lista(item):
thumbnail=thumbnail, thumbnail=thumbnail,
fulltitle=scrapedtitle, fulltitle=scrapedtitle,
infoLabels={'filtro': filtro_list}, infoLabels={'filtro': filtro_list},
contentTitle=scrapedtitle,
contentSerieName=scrapedtitle,
extra=item.extra, extra=item.extra,
context=autoplay.context context=autoplay.context
)) ))
if 'serie' in scrapedurl:
new_item.contentSerieName=scrapedtitle
else:
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
else: else:
item.extra = item.extra.rstrip('s/') item.extra = item.extra.rstrip('s/')
if item.extra in url: if item.extra in url:
@@ -222,11 +226,14 @@ def lista(item):
thumbnail=scrapedthumbnail, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, fulltitle=scrapedtitle,
infoLabels={'filtro': filtro_list}, infoLabels={'filtro': filtro_list},
contentTitle=scrapedtitle,
contentSerieName=scrapedtitle,
extra=item.extra, extra=item.extra,
context=autoplay.context context=autoplay.context
)) ))
if 'serie' in scrapedurl:
new_item.contentSerieName=scrapedtitle
else:
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

View File

@@ -55,6 +55,7 @@ def lista(item):
# Paginacion # Paginacion
num_items_x_pagina = 30 num_items_x_pagina = 30
min = item.page * num_items_x_pagina min = item.page * num_items_x_pagina
min=min-item.page
max = min + num_items_x_pagina - 1 max = min + num_items_x_pagina - 1
for link, img, name in matches[min:max]: for link, img, name in matches[min:max]:

View File

@@ -2,11 +2,15 @@
import copy import copy
import re import re
import sqlite3
import time import time
from core import filetools
from core import httptools
from core import jsontools from core import jsontools
from core import scrapertools from core import scrapertools
from core.item import InfoLabels from core.item import InfoLabels
from platformcode import config
from platformcode import logger from platformcode import logger
# ----------------------------------------------------------------------------------------------------------- # -----------------------------------------------------------------------------------------------------------
@@ -61,6 +65,123 @@ from platformcode import logger
# -------------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------------
otmdb_global = None otmdb_global = None
fname = filetools.join(config.get_data_path(), "alfa_db.sqlite")
def create_bd():
conn = sqlite3.connect(fname)
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS tmdb_cache (url TEXT PRIMARY KEY, response TEXT, added TEXT)')
conn.commit()
conn.close()
def drop_bd():
conn = sqlite3.connect(fname)
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS tmdb_cache')
conn.commit()
conn.close()
return True
create_bd()
# El nombre de la funcion es el nombre del decorador y recibe la funcion que decora.
def cache_response(fn):
logger.info()
# import time
# start_time = time.time()
def wrapper(*args):
import base64
def check_expired(ts):
import datetime
valided = False
cache_expire = config.get_setting("tmdb_cache_expire", default=0)
saved_date = datetime.datetime.fromtimestamp(ts)
current_date = datetime.datetime.fromtimestamp(time.time())
elapsed = current_date - saved_date
# 1 day
if cache_expire == 0:
if elapsed > datetime.timedelta(days=1):
valided = False
else:
valided = True
# 7 days
elif cache_expire == 1:
if elapsed > datetime.timedelta(days=7):
valided = False
else:
valided = True
# 15 days
elif cache_expire == 2:
if elapsed > datetime.timedelta(days=15):
valided = False
else:
valided = True
# 1 month - 30 days
elif cache_expire == 3:
# no tenemos en cuenta febrero o meses con 31 días
if elapsed > datetime.timedelta(days=30):
valided = False
else:
valided = True
# no expire
elif cache_expire == 4:
valided = True
return valided
result = {}
try:
# no está activa la cache
if not config.get_setting("tmdb_cache", default=False):
result = fn(*args)
else:
conn = sqlite3.connect(fname)
c = conn.cursor()
url_base64 = base64.b64encode(args[0])
c.execute("SELECT response, added FROM tmdb_cache WHERE url=?", (url_base64,))
row = c.fetchone()
if row and check_expired(float(row[1])):
result = eval(base64.b64decode(row[0]))
# si no se ha obtenido información, llamamos a la funcion
if not result:
result = fn(*args)
result_base64 = base64.b64encode(str(result))
c.execute("INSERT OR REPLACE INTO tmdb_cache (url, response, added) VALUES (?, ?, ?)",
(url_base64, result_base64, time.time()))
conn.commit()
conn.close()
# elapsed_time = time.time() - start_time
# logger.debug("TARDADO %s" % elapsed_time)
# error al obtener los datos
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
return result
return wrapper
def set_infoLabels(source, seekTmdb=True, idioma_busqueda='es'): def set_infoLabels(source, seekTmdb=True, idioma_busqueda='es'):
@@ -78,6 +199,7 @@ def set_infoLabels(source, seekTmdb=True, idioma_busqueda='es'):
@return: un numero o lista de numeros con el resultado de las llamadas a set_infoLabels_item @return: un numero o lista de numeros con el resultado de las llamadas a set_infoLabels_item
@rtype: int, list @rtype: int, list
""" """
start_time = time.time() start_time = time.time()
if type(source) == list: if type(source) == list:
ret = set_infoLabels_itemlist(source, seekTmdb, idioma_busqueda) ret = set_infoLabels_itemlist(source, seekTmdb, idioma_busqueda)
@@ -95,34 +217,35 @@ def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda='es'):
La API tiene un limite de 40 peticiones por IP cada 10'' y por eso la lista no deberia tener mas de 30 items La API tiene un limite de 40 peticiones por IP cada 10'' y por eso la lista no deberia tener mas de 30 items
para asegurar un buen funcionamiento de esta funcion. para asegurar un buen funcionamiento de esta funcion.
:param item_list: listado de objetos Item que representan peliculas, series o capitulos. El atributo @param item_list: listado de objetos Item que representan peliculas, series o capitulos. El atributo
infoLabels de cada objeto Item sera modificado incluyendo los datos extras localizados. infoLabels de cada objeto Item sera modificado incluyendo los datos extras localizados.
:type item_list: list @type item_list: list
:param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario @param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario
obtiene los datos del propio Item si existen. obtiene los datos del propio Item si existen.
:type seekTmdb: bool @type seekTmdb: bool
:param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org. @param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org.
:type idioma_busqueda: str @type idioma_busqueda: str
:return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo @return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo
infoLabels de cada Item. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y infoLabels de cada Item. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y
negativo en caso contrario. negativo en caso contrario.
:rtype: list @rtype: list
""" """
import threading import threading
semaforo = threading.Semaphore(20) threads_num = config.get_setting("tmdb_threads", default=20)
semaforo = threading.Semaphore(threads_num)
lock = threading.Lock() lock = threading.Lock()
r_list = list() r_list = list()
i = 0 i = 0
l_hilo = list() l_hilo = list()
def sub_thread(item, _i, _seekTmdb): def sub_thread(_item, _i, _seekTmdb):
semaforo.acquire() semaforo.acquire()
ret = set_infoLabels_item(item, _seekTmdb, idioma_busqueda, lock) ret = set_infoLabels_item(_item, _seekTmdb, idioma_busqueda, lock)
# logger.debug(str(ret) + "item: " + item.tostring()) # logger.debug(str(ret) + "item: " + _item.tostring())
semaforo.release() semaforo.release()
r_list.append((_i, item, ret)) r_list.append((_i, _item, ret))
for item in item_list: for item in item_list:
t = threading.Thread(target=sub_thread, args=(item, i, seekTmdb)) t = threading.Thread(target=sub_thread, args=(item, i, seekTmdb))
@@ -142,21 +265,22 @@ def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda='es'):
def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None): def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
# ----------------------------------------------------------------------------------------------------------- """
# Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula. Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula.
#
# Parametros: @param item: Objeto Item que representa un pelicula, serie o capitulo. El atributo infoLabels sera modificado
# item: (Item) Objeto Item que representa un pelicula, serie o capitulo. El atributo infoLabels sera incluyendo los datos extras localizados.
# modificado incluyendo los datos extras localizados. @type item: Item
# (opcional) seekTmdb: (bool) Si es True hace una busqueda en www.themoviedb.org para obtener los datos, @param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario
# en caso contrario obtiene los datos del propio Item si existen. obtiene los datos del propio Item si existen.
# (opcional) idioma_busqueda: (str) Codigo del idioma segun ISO 639-1, en caso de busqueda en @type seekTmdb: bool
# www.themoviedb.org. @param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org.
# Retorna: @type idioma_busqueda: str
# Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo @param lock: para uso de threads cuando es llamado del metodo 'set_infoLabels_itemlist'
# item.infoLabels. @return: Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo item.infoLabels.
# Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario.
# --------------------------------------------------------------------------------------------------------- @rtype: int
"""
global otmdb_global global otmdb_global
def __leer_datos(otmdb_aux): def __leer_datos(otmdb_aux):
@@ -183,10 +307,9 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
if lock: if lock:
lock.acquire() lock.acquire()
if not otmdb_global or (item.infoLabels['tmdb_id'] and if not otmdb_global or (item.infoLabels['tmdb_id']
str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \ and str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \
or (otmdb_global.texto_buscado and or (otmdb_global.texto_buscado and otmdb_global.texto_buscado != item.infoLabels['tvshowtitle']):
otmdb_global.texto_buscado != item.infoLabels['tvshowtitle']):
if item.infoLabels['tmdb_id']: if item.infoLabels['tmdb_id']:
otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda,
idioma_busqueda=idioma_busqueda) idioma_busqueda=idioma_busqueda)
@@ -196,8 +319,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
__leer_datos(otmdb_global) __leer_datos(otmdb_global)
temporada = otmdb_global.get_temporada(numtemporada)
if lock: if lock:
lock.release() lock.release()
@@ -230,7 +351,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
return len(item.infoLabels) return len(item.infoLabels)
else: else:
# Tenemos numero de temporada valido pero no numero de episodio... # Tenemos numero de temporada valido pero no numero de episodio...
# ... buscar datos temporada # ... buscar datos temporada
@@ -254,7 +374,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
# Buscar... # Buscar...
else: else:
otmdb = copy.copy(otmdb_global) otmdb = copy.copy(otmdb_global)
# if otmdb is None: # Se elimina por q sino falla al añadir series por falta de imdb, pero por contra provoca mas llamadas
# Busquedas por ID... # Busquedas por ID...
if item.infoLabels['tmdb_id']: if item.infoLabels['tmdb_id']:
# ...Busqueda por tmdb_id # ...Busqueda por tmdb_id
@@ -270,8 +389,7 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
elif tipo_busqueda == 'tv': # buscar con otros codigos elif tipo_busqueda == 'tv': # buscar con otros codigos
if item.infoLabels['tvdb_id']: if item.infoLabels['tvdb_id']:
# ...Busqueda por tvdb_id # ...Busqueda por tvdb_id
otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", tipo=tipo_busqueda,
tipo=tipo_busqueda,
idioma_busqueda=idioma_busqueda) idioma_busqueda=idioma_busqueda)
elif item.infoLabels['freebase_mid']: elif item.infoLabels['freebase_mid']:
# ...Busqueda por freebase_mid # ...Busqueda por freebase_mid
@@ -303,16 +421,16 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
else: else:
titulo_buscado = item.fulltitle titulo_buscado = item.fulltitle
otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda, otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda,
idioma_busqueda=idioma_busqueda, filtro=item.infoLabels.get('filtro', {}), year=item.infoLabels['year'])
filtro=item.infoLabels.get('filtro', {}),
year=item.infoLabels['year'])
if otmdb.get_id() and not lock: if otmdb.get_id() and config.get_setting("tmdb_plus_info", default=False):
# Si la busqueda ha dado resultado y no se esta buscando una lista de items, # Si la busqueda ha dado resultado y no se esta buscando una lista de items,
# realizar otra busqueda para ampliar la informacion # realizar otra busqueda para ampliar la informacion
otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda, otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda)
idioma_busqueda=idioma_busqueda)
if lock and lock.locked():
lock.release()
if otmdb is not None and otmdb.get_id(): if otmdb is not None and otmdb.get_id():
# La busqueda ha encontrado un resultado valido # La busqueda ha encontrado un resultado valido
@@ -386,8 +504,8 @@ def find_and_set_infoLabels(item):
def get_nfo(item): def get_nfo(item):
""" """
Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, para tmdb funciona
para tmdb funciona solo pasandole la url solo pasandole la url.
@param item: elemento que contiene los datos necesarios para generar la info @param item: elemento que contiene los datos necesarios para generar la info
@type item: Item @type item: Item
@rtype: str @rtype: str
@@ -427,9 +545,9 @@ class ResultDictDefault(dict):
return self.__missing__(key) return self.__missing__(key)
def __missing__(self, key): def __missing__(self, key):
''' """
valores por defecto en caso de que la clave solicitada no exista valores por defecto en caso de que la clave solicitada no exista
''' """
if key in ['genre_ids', 'genre', 'genres']: if key in ['genre_ids', 'genre', 'genres']:
return list() return list()
elif key == 'images_posters': elif key == 'images_posters':
@@ -677,14 +795,44 @@ class Tmdb(object):
else: else:
logger.debug("Creado objeto vacio") logger.debug("Creado objeto vacio")
@staticmethod
@cache_response
def get_json(url):
try:
result = httptools.downloadpage(url, cookies=False)
res_headers = result.headers
# logger.debug("res_headers es %s" % res_headers)
dict_data = jsontools.load(result.data)
# logger.debug("result_data es %s" % dict_data)
if "status_code" in dict_data:
logger.debug("\nError de tmdb: %s %s" % (dict_data["status_code"], dict_data["status_message"]))
if dict_data["status_code"] == 25:
while "status_code" in dict_data and dict_data["status_code"] == 25:
wait = int(res_headers['retry-after'])
logger.debug("Limite alcanzado, esperamos para volver a llamar en ...%s" % wait)
time.sleep(wait)
# logger.debug("RE Llamada #%s" % d)
result = httptools.downloadpage(url, cookies=False)
res_headers = result.headers
# logger.debug("res_headers es %s" % res_headers)
dict_data = jsontools.load(result.data)
# logger.debug("result_data es %s" % dict_data)
# error al obtener los datos
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
dict_data = {}
return dict_data
@classmethod @classmethod
def rellenar_dic_generos(cls, tipo='movie', idioma='es'): def rellenar_dic_generos(cls, tipo='movie', idioma='es'):
resultado = {}
# Si se busca en idioma catalán, se cambia a español para el diccionario de géneros
if idioma == "ca":
idioma = "es"
# Rellenar diccionario de generos del tipo e idioma pasados como parametros # Rellenar diccionario de generos del tipo e idioma pasados como parametros
if idioma not in cls.dic_generos: if idioma not in cls.dic_generos:
cls.dic_generos[idioma] = {} cls.dic_generos[idioma] = {}
@@ -695,21 +843,16 @@ class Tmdb(object):
% (tipo, idioma)) % (tipo, idioma))
try: try:
logger.info("[Tmdb.py] Rellenando dicionario de generos") logger.info("[Tmdb.py] Rellenando dicionario de generos")
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
resultado = cls.get_json(url)
lista_generos = resultado["genres"] lista_generos = resultado["genres"]
for i in lista_generos: for i in lista_generos:
cls.dic_generos[idioma][tipo][str(i["id"])] = i["name"] cls.dic_generos[idioma][tipo][str(i["id"])] = i["name"]
except: except:
pass logger.error("Error generando diccionarios")
if "status_code" in resultado:
msg = "Error de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.error(msg)
def __by_id(self, source='tmdb'): def __by_id(self, source='tmdb'):
resultado = {}
buscando = ""
if self.busqueda_id: if self.busqueda_id:
if source == "tmdb": if source == "tmdb":
@@ -728,31 +871,26 @@ class Tmdb(object):
buscando = "%s: %s" % (source.capitalize(), self.busqueda_id) buscando = "%s: %s" % (source.capitalize(), self.busqueda_id)
logger.info("[Tmdb.py] Buscando %s:\n%s" % (buscando, url)) logger.info("[Tmdb.py] Buscando %s:\n%s" % (buscando, url))
resultado = self.get_json(url)
try: if resultado:
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
if source != "tmdb": if source != "tmdb":
if self.busqueda_tipo == "movie": if self.busqueda_tipo == "movie":
resultado = resultado["movie_results"][0] resultado = resultado["movie_results"][0]
else: else:
resultado = resultado["tv_results"][0] resultado = resultado["tv_results"][0]
except:
resultado = {}
if resultado and not "status_code" in resultado: self.results = [resultado]
self.results = [resultado] self.total_results = 1
self.total_results = 1 self.total_pages = 1
self.total_pages = 1 self.result = ResultDictDefault(resultado)
self.result = ResultDictDefault(resultado)
else: else:
# No hay resultados de la busqueda # No hay resultados de la busqueda
msg = "La busqueda de %s no dio resultados." % buscando msg = "La busqueda de %s no dio resultados." % buscando
if "status_code" in resultado: logger.debug(msg)
msg += "\nError de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.debug(msg)
def __search(self, index_results=0, page=1): def __search(self, index_results=0, page=1):
resultado = {}
self.result = ResultDictDefault() self.result = ResultDictDefault()
results = [] results = []
total_results = 0 total_results = 0
@@ -767,17 +905,14 @@ class Tmdb(object):
self.busqueda_idioma, self.busqueda_include_adult, page)) self.busqueda_idioma, self.busqueda_include_adult, page))
if self.busqueda_year: if self.busqueda_year:
url += '&year=%s' % (self.busqueda_year) url += '&year=%s' % self.busqueda_year
buscando = self.busqueda_texto.capitalize() buscando = self.busqueda_texto.capitalize()
logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url)) logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url))
resultado = self.get_json(url)
try: total_results = resultado.get("total_results", 0)
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) total_pages = resultado.get("total_pages", 0)
total_results = resultado["total_results"]
total_pages = resultado["total_pages"]
except:
total_results = 0
if total_results > 0: if total_results > 0:
results = resultado["results"] results = resultado["results"]
@@ -808,13 +943,10 @@ class Tmdb(object):
else: else:
# No hay resultados de la busqueda # No hay resultados de la busqueda
msg = "La busqueda de '%s' no dio resultados para la pagina %s" % (buscando, page) msg = "La busqueda de '%s' no dio resultados para la pagina %s" % (buscando, page)
if "status_code" in resultado:
msg += "\nError de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.error(msg) logger.error(msg)
return 0 return 0
def __discover(self, index_results=0): def __discover(self, index_results=0):
resultado = {}
self.result = ResultDictDefault() self.result = ResultDictDefault()
results = [] results = []
total_results = 0 total_results = 0
@@ -834,17 +966,10 @@ class Tmdb(object):
% (type_search, "&".join(params))) % (type_search, "&".join(params)))
logger.info("[Tmdb.py] Buscando %s:\n%s" % (type_search, url)) logger.info("[Tmdb.py] Buscando %s:\n%s" % (type_search, url))
resultado = self.get_json(url)
try: total_results = resultado.get("total_results", -1)
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) total_pages = resultado.get("total_pages", 1)
total_results = resultado["total_results"]
total_pages = resultado["total_pages"]
except:
if resultado and not "status_code" in resultado:
total_results = -1
total_pages = 1
else:
total_results = 0
if total_results > 0: if total_results > 0:
results = resultado["results"] results = resultado["results"]
@@ -979,7 +1104,6 @@ class Tmdb(object):
:return: Devuelve la sinopsis de una pelicula o serie :return: Devuelve la sinopsis de una pelicula o serie
:rtype: str :rtype: str
""" """
resultado = {}
ret = "" ret = ""
if 'id' in self.result: if 'id' in self.result:
@@ -994,19 +1118,13 @@ class Tmdb(object):
url = ('http://api.themoviedb.org/3/%s/%s?api_key=6889f6089877fd092454d00edb44a84d&language=%s' % url = ('http://api.themoviedb.org/3/%s/%s?api_key=6889f6089877fd092454d00edb44a84d&language=%s' %
(self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma)) (self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma))
try:
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) resultado = self.get_json(url)
except:
pass
if 'overview' in resultado: if 'overview' in resultado:
self.result['overview'] = resultado['overview'] self.result['overview'] = resultado['overview']
ret = self.result['overview'] ret = self.result['overview']
if "status_code" in resultado:
msg = "Error de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.debug(msg)
return ret return ret
def get_poster(self, tipo_respuesta="str", size="original"): def get_poster(self, tipo_respuesta="str", size="original"):
@@ -1133,18 +1251,22 @@ class Tmdb(object):
buscando = "id_Tmdb: " + str(self.result["id"]) + " temporada: " + str(numtemporada) + "\nURL: " + url buscando = "id_Tmdb: " + str(self.result["id"]) + " temporada: " + str(numtemporada) + "\nURL: " + url
logger.info("[Tmdb.py] Buscando " + buscando) logger.info("[Tmdb.py] Buscando " + buscando)
try: try:
self.temporada[numtemporada] = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) # self.temporada[numtemporada] = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
except: self.temporada[numtemporada] = self.get_json(url)
self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"}
if "status_code" in self.temporada[numtemporada]: except:
# Se ha producido un error logger.error("No se ha podido obtener la temporada")
msg = "La busqueda de " + buscando + " no dio resultados." self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"}
msg += "\nError de tmdb: %s %s" % (
self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
logger.debug(msg)
self.temporada[numtemporada] = {"episodes": {}} self.temporada[numtemporada] = {"episodes": {}}
# if "status_code" in self.temporada[numtemporada]:
# # Se ha producido un error
# msg = "La busqueda de " + buscando + " no dio resultados."
# msg += "\nError de tmdb: %s %s" % (
# self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
# logger.debug(msg)
# self.temporada[numtemporada] = {"episodes": {}}
return self.temporada[numtemporada] return self.temporada[numtemporada]
def get_episodio(self, numtemporada=1, capitulo=1): def get_episodio(self, numtemporada=1, capitulo=1):
@@ -1242,10 +1364,8 @@ class Tmdb(object):
# Primera búsqueda de videos en el idioma de busqueda # Primera búsqueda de videos en el idioma de busqueda
url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d&language=%s" \ url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d&language=%s" \
% (self.busqueda_tipo, self.result['id'], self.busqueda_idioma) % (self.busqueda_tipo, self.result['id'], self.busqueda_idioma)
try:
dict_videos = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) dict_videos = self.get_json(url)
except:
pass
if dict_videos['results']: if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
@@ -1255,19 +1375,13 @@ class Tmdb(object):
if self.busqueda_idioma != 'en': if self.busqueda_idioma != 'en':
url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d" \ url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d" \
% (self.busqueda_tipo, self.result['id']) % (self.busqueda_tipo, self.result['id'])
try:
dict_videos = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) dict_videos = self.get_json(url)
except:
pass
if dict_videos['results']: if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
self.result["videos"].extend(dict_videos['results']) self.result["videos"].extend(dict_videos['results'])
if "status_code" in dict_videos:
msg = "Error de tmdb: %s %s" % (dict_videos["status_code"], dict_videos["status_message"])
logger.debug(msg)
# Si las busqueda han obtenido resultados devolver un listado de objetos # Si las busqueda han obtenido resultados devolver un listado de objetos
for i in self.result['videos']: for i in self.result['videos']:
if i['site'] == "YouTube": if i['site'] == "YouTube":
@@ -1316,7 +1430,8 @@ class Tmdb(object):
if ret_infoLabels['season'] and self.temporada.get(ret_infoLabels['season']): if ret_infoLabels['season'] and self.temporada.get(ret_infoLabels['season']):
# Si hay datos cargados de la temporada indicada # Si hay datos cargados de la temporada indicada
episodio = -1 episodio = -1
if ret_infoLabels['episode']: episodio = ret_infoLabels['episode'] if ret_infoLabels['episode']:
episodio = ret_infoLabels['episode']
items.extend(self.get_episodio(ret_infoLabels['season'], episodio).items()) items.extend(self.get_episodio(ret_infoLabels['season'], episodio).items())
@@ -1371,8 +1486,10 @@ class Tmdb(object):
ret_infoLabels['imdb_id'] = v ret_infoLabels['imdb_id'] = v
elif k == 'external_ids': elif k == 'external_ids':
if 'tvdb_id' in v: ret_infoLabels['tvdb_id'] = v['tvdb_id'] if 'tvdb_id' in v:
if 'imdb_id' in v: ret_infoLabels['imdb_id'] = v['imdb_id'] ret_infoLabels['tvdb_id'] = v['tvdb_id']
if 'imdb_id' in v:
ret_infoLabels['imdb_id'] = v['imdb_id']
elif k in ['genres', "genre_ids", "genre"]: elif k in ['genres', "genre_ids", "genre"]:
ret_infoLabels['genre'] = self.get_generos(origen) ret_infoLabels['genre'] = self.get_generos(origen)
@@ -1405,7 +1522,7 @@ class Tmdb(object):
elif isinstance(v[0], dict): elif isinstance(v[0], dict):
# {'iso_3166_1': 'FR', 'name':'France'} # {'iso_3166_1': 'FR', 'name':'France'}
for i in v: for i in v:
if i.has_key('iso_3166_1'): if 'iso_3166_1' in i:
pais = Tmdb.dic_country.get(i['iso_3166_1'], i['iso_3166_1']) pais = Tmdb.dic_country.get(i['iso_3166_1'], i['iso_3166_1'])
l_country = list(set(l_country + [pais])) l_country = list(set(l_country + [pais]))
@@ -1421,7 +1538,6 @@ class Tmdb(object):
for crew in v: for crew in v:
l_writer = list(set(l_writer + [crew['name']])) l_writer = list(set(l_writer + [crew['name']]))
elif isinstance(v, str) or isinstance(v, int) or isinstance(v, float): elif isinstance(v, str) or isinstance(v, int) or isinstance(v, float):
ret_infoLabels[k] = v ret_infoLabels[k] = v

View File

@@ -19,12 +19,7 @@ from platformcode import platformtools
HOST = "https://api.thetvdb.com" HOST = "https://api.thetvdb.com"
HOST_IMAGE = "http://thetvdb.com/banners/" HOST_IMAGE = "http://thetvdb.com/banners/"
# comprobación tras el cambio de tipos en config.get_setting TOKEN = config.get_setting("tvdb_token", default="")
if config.get_setting("tvdb_token") is not None:
TOKEN = config.get_setting("tvdb_token")
else:
TOKEN = ""
DEFAULT_LANG = "es" DEFAULT_LANG = "es"
DEFAULT_HEADERS = { DEFAULT_HEADERS = {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
@@ -97,7 +92,7 @@ def find_and_set_infoLabels(item):
otvdb_global = Tvdb(imdb_id=item.infoLabels.get("imdb_id")) otvdb_global = Tvdb(imdb_id=item.infoLabels.get("imdb_id"))
elif not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']: elif not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']:
otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id']) # , tipo=tipo_busqueda, idioma_busqueda="es") otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id'])
if not item.contentSeason: if not item.contentSeason:
p_dialog.update(50, "Buscando información de la serie", "Obteniendo resultados...") p_dialog.update(50, "Buscando información de la serie", "Obteniendo resultados...")

View File

@@ -127,6 +127,11 @@ def run(item=None):
else: else:
return keymaptools.set_key() return keymaptools.set_key()
elif item.action == "script":
from core import tmdb
if tmdb.drop_bd():
platformtools.dialog_notification("Alfa", "caché eliminada", time=2000, sound=False)
# Action in certain channel specified in "action" and "channel" parameters # Action in certain channel specified in "action" and "channel" parameters
else: else:

View File

@@ -48,6 +48,15 @@
<setting label="Botones/Teclas de acceso (Cambios requieren reiniciar Kodi)" type="lsep"/> <setting label="Botones/Teclas de acceso (Cambios requieren reiniciar Kodi)" type="lsep"/>
<setting id="shortcut_key" type="action" label="30999" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIg0KfQ==)" /> <setting id="shortcut_key" type="action" label="30999" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIg0KfQ==)" />
<setting type="sep"/>
<setting label="TheMovieDB (obtiene datos de las películas o series)" type="lsep"/>
<setting id="tmdb_threads" type="labelenum" values="5|10|15|20|25|30" label="Búsquedas simultáneas (puede causar inestabilidad)" default="20"/>
<setting id="tmdb_plus_info" type="bool" label="Buscar información extendida (datos de actores) Aumenta el tiempo de búsqueda" default="false"/>
<setting id="tmdb_cache" type="bool" label="Usar caché (mejora las búsquedas recurrentes)" default="true"/>
<setting id="tmdb_cache_expire" type="enum" lvalues="cada 1 día|cada 7 días|cada 15 días|cada 30 días|No" label="¿Renovar caché?" enable="eq(-1,true)" default="4"/>
<setting id="tmdb_clean_db_cache" type="action" label="Pulse para 'Borrar caché' guardada" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAic2NyaXB0Ig0KfQ==)" />
</category> </category>
</settings> </settings>

View File

@@ -32,13 +32,18 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
'Cookie': ''} 'Cookie': ''}
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, '(?s)SRC="(https://www.flashx.tv/counter.cgi\?fx=[^"]+)')
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx # Para obtener el f y el fxfx
js_fxfx = scrapertools.find_single_match(data, 'src="(https://www.flashx.tv/js/code.js\?cache=[0-9]+)') js_fxfx = scrapertools.find_single_match(data, 'src="(https://www.flashx.tv/js/code.js.*?cache=[0-9]+)')
data_fxfx = httptools.downloadpage(js_fxfx).data data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","") mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)') matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches: for f, v in matches:
pfxfx += f + "=" + v + "&" pfxfx += f + "=" + v + "&"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'} # {f: 'y', fxfx: '6'}
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"') flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"') fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
@@ -51,10 +56,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers['Referer'] = "https://www.flashx.tv/" headers['Referer'] = "https://www.flashx.tv/"
headers['Accept'] = "*/*" headers['Accept'] = "*/*"
headers['Host'] = "www.flashx.tv" headers['Host'] = "www.flashx.tv"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
headers['X-Requested-With'] = 'XMLHttpRequest' headers['X-Requested-With'] = 'XMLHttpRequest'
httptools.downloadpage(coding_url, headers=headers)
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
httptools.downloadpage(coding_url, headers=headers, replace_headers=True)
httptools.downloadpage(cgi_counter, headers=headers, replace_headers=True)
try: try:
time.sleep(int(wait_time) + 1) time.sleep(int(wait_time) + 1)
@@ -63,7 +69,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers.pop('X-Requested-With') headers.pop('X-Requested-With')
headers['Content-Type'] = 'application/x-www-form-urlencoded' headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
@@ -71,7 +77,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"') url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
try: try:
data = httptools.downloadpage(url_reload, cookies=False).data data = httptools.downloadpage(url_reload, cookies=False).data
data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
except: except:
pass pass

View File

@@ -11,27 +11,20 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
if "Not Found" in data: if "Not Found" in data:
return False, "[streamixcloud] El archivo no existe o ha sido borrado" return False, "[streamixcloud] El archivo no existe o ha sido borrado"
if "Video is processing" in data:
return False, "[streamixcloud] El video se está procesando, inténtelo mas tarde"
return True, "" return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""): def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
video_urls = [] video_urls = []
packed = scrapertools.find_single_match(data, packed = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script")
data = jsunpack.unpack(packed) data = jsunpack.unpack(packed)
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",') media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
# thumb = scrapertools.find_single_match(data, '\],image:"([^"]+)"')
ext = scrapertools.get_filename_from_url(media_url[0])[-4:] ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
for url in media_url: for url in media_url:
video_urls.append(["%s [streamixcloud]" % ext, url]) video_urls.append(["%s [streamixcloud]" % ext, url])
return video_urls return video_urls