49 Commits

Author SHA1 Message Date
alfa-addon
d8889b1592 v2.3.0 2017-10-25 18:54:03 -04:00
alfa-addon
410d947e4b fixed 2017-10-25 18:53:49 -04:00
Alfa
a1339a5545 Merge pull request #137 from Intel11/patch-1
Actualizado
2017-10-26 00:56:56 +02:00
Intel1
a7e18ef813 allpeliculas: Agregado sección - Colecciones 2017-10-25 17:34:11 -05:00
Alfa
15e06d4386 Merge pull request #138 from Alfa-beto/Fixes
Correcciones varias
2017-10-26 00:24:59 +02:00
Alfa
574279c2da Merge pull request #140 from danielr460/master
Arreglos Menores
2017-10-26 00:24:44 +02:00
Alfa
2a1c1fb081 Merge pull request #141 from alfa-jor/master
cache tmdb
2017-10-26 00:24:29 +02:00
alfa_addon_10
df1fbe3b47 fix 2017-10-25 19:48:10 +02:00
Intel1
52344e42cc pelismundo: fix filtrado de genero adulto 2017-10-25 10:37:17 -05:00
Intel1
d725443479 Update animemovil.json 2017-10-25 08:19:37 -05:00
Intel1
c70f107ff1 animemovil actualizado para Alfa 2017-10-24 13:28:05 -05:00
alfa_addon_10
f29911cd52 human being text 2017-10-24 20:00:11 +02:00
alfa_addon_10
90c335df63 splited options, human readibility 2017-10-24 19:29:10 +02:00
alfa_addon_10
cfc8b41a5a Merge branch 'master' of https://github.com/alfa-addon/addon 2017-10-24 18:48:12 +02:00
alfa_addon_10
5a332243e0 tmdb cache and configuration 2017-10-24 18:47:02 +02:00
Intel1
9fc9bc1fd5 estrenosgo: actualizado url de videos 2017-10-24 10:06:27 -05:00
unknown
c91ae53fba Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-24 08:25:11 -03:00
danielr460
5f5888a539 Las primeras series de cada página se eliminaban 2017-10-23 14:32:07 -05:00
danielr460
597fa9a7e0 Eliminado Renumber tools porque era innecesario, y se agrego la renumeración a la única serie que no lo tenia (Ranma 1/2) 2017-10-23 12:22:51 -05:00
Intel1
6f0680219f streamixclud: fix test_video_exists 2017-10-23 12:22:33 -05:00
Intel1
b863f0ea20 animeflv.ru: actualizado findvideos 2017-10-23 12:11:16 -05:00
danielr460
4dcc6395be Arreglos Menores 2017-10-23 11:37:53 -05:00
Intel1
107262cef3 cinetux: patron actualizado 2017-10-23 10:38:23 -05:00
Unknown
b9b1cc6945 Mejora en el codigo de Pelisplus 2017-10-21 14:23:43 -03:00
Intel1
5fa341950c flashx: fix again 2017-10-21 12:12:34 -05:00
alfa-addon
54c818984a v2.2.4 2017-10-20 22:00:22 -04:00
Alfa
98b9c73046 Merge pull request #135 from Intel11/patch-1
Actualizado
2017-10-21 03:34:28 +02:00
Alfa
295af0e9e8 Merge pull request #136 from Alfa-beto/Fixes
Correcciones
2017-10-21 03:34:00 +02:00
Unknown
d07ae8e62b Correcciones 2017-10-20 16:47:39 -03:00
Unknown
1f2cadb689 limpieza de codigo 2017-10-20 15:36:10 -03:00
Intel1
5ed94e84fc animeflv_me: raparado paginador 2017-10-20 11:20:56 -05:00
Unknown
5afb770271 Correccion a verpeliculasnuevas 2017-10-20 13:15:49 -03:00
Unknown
4aa7ff5bc7 Correccion a Playmax 2017-10-20 11:31:16 -03:00
Intel1
6bf0100f41 downace: actualizado test_video_exists 2017-10-20 09:26:47 -05:00
Intel1
ea8acc1ea1 downace, mensaje error de servidor 2017-10-20 09:13:35 -05:00
Intel1
d70b8d95f9 Update flashx.py 2017-10-19 12:53:20 -05:00
Intel1
915952c85d Delete crimenes.py 2017-10-19 08:25:54 -05:00
Intel1
4c7a349db2 Delete crimenes.json 2017-10-19 08:25:39 -05:00
Intel1
88d26523cd Delete vixto.py 2017-10-18 17:30:00 -05:00
Intel1
cc4fc8cbde Delete vixto.json 2017-10-18 17:29:30 -05:00
Intel1
5cb64e4b41 hdfull fix marcar como visto 2017-10-18 15:31:52 -05:00
Intel1
efa960bcb7 flashx fix 2017-10-18 12:57:34 -05:00
Unknown
f01da0ddcb Corregido Gamovideo 2017-10-18 13:03:56 -03:00
Unknown
cad7e96441 Correccion divxatope 2017-10-18 11:38:38 -03:00
Intel1
341953539e Update pelismundo.py 2017-10-18 09:29:02 -05:00
Intel1
0e7c8d22ef Update pelismundo.py 2017-10-18 09:19:57 -05:00
Intel1
e20b32b7e9 pelismundo: codigo mejorado
pelismundo: código mejorado
2017-10-17 16:58:57 -05:00
Unknown
1cbca62d82 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-17 08:04:15 -03:00
unknown
5002bf0ca0 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-15 14:20:01 -03:00
31 changed files with 910 additions and 1036 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?> <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.2.3" provider-name="Alfa Addon"> <addon id="plugin.video.alfa" name="Alfa" version="2.3.0" provider-name="Alfa Addon">
<requires> <requires>
<import addon="xbmc.python" version="2.1.0"/> <import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/> <import addon="script.module.libtorrent" optional="true"/>
@@ -19,12 +19,14 @@
</assets> </assets>
<news>[B]Estos son los cambios para esta versión:[/B] <news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» animeyt » pelismundo » cartoonlatino » serieslan
» asialiveaction » animeflv_me » pelisplus » pedropolis
» newpct1 » wopelis » flashx » cinetux
» gvideo » powvideo » animeflv_ru » streamixcloud
» estrenosgo » animemovil
» allpeliculas » pelismundo
¤ arreglos internos ¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] y [COLOR yellow]robalo[/COLOR] por su colaboración en esta versión[/COLOR] [COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
</news> </news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary> <summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -35,12 +35,62 @@ def mainlist(item):
url= host + "movies/newmovies?page=1", extra1 = 0)) url= host + "movies/newmovies?page=1", extra1 = 0))
itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png", itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/getGanres")) url= host + "movies/getGanres"))
itemlist.append(item.clone(title="Colecciones", action="colecciones", fanart="http://i.imgur.com/c3HS8kj.png",
url= host))
itemlist.append(item.clone(title="", action="")) itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search")) itemlist.append(item.clone(title="Buscar...", action="search"))
return itemlist return itemlist
def colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'href="(/peliculas[^"]+).*?'
patron += 'title_geo"><span>([^<]+).*?'
patron += 'title_eng"><span>([^<]+).*?'
patron += 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedcantidad, scrapedthumbnail in matches:
if scrapedtitle == "LGTB" and config.get_setting("adult_mode") == 0:
continue
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
thumbnail = host + scrapedthumbnail,
title = title,
url = host + scrapedurl
))
return itemlist
def listado_colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
post = "page=1"
data = httptools.downloadpage(host + data_url, post=post).data
patron = 'a href="(/peli[^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'class="c_fichas_title">([^<]+).*?'
patron += 'Año:.*?href="">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
item.infoLabels['year'] = scrapedyear
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = host + scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def generos(item): def generos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
@@ -61,6 +111,9 @@ def findvideos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
if "Próximamente" in data:
itemlist.append(Item(channel = item.channel, title = "Próximamente"))
return itemlist
patron = 'data-link="([^"]+).*?' patron = 'data-link="([^"]+).*?'
patron += '>([^<]+)' patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data, patron)
@@ -137,7 +190,7 @@ def lista(item):
def search(item, texto): def search(item, texto):
logger.info() logger.info()
if texto != "": if texto != "":
texto = texto.replace(" ", "+") texto = texto.replace(" ", "%20")
item.url = host + "/movies/search/" + texto item.url = host + "/movies/search/" + texto
item.extra = "busqueda" item.extra = "busqueda"
try: try:

View File

@@ -12,14 +12,14 @@ from core import servertools
from core.item import Item from core.item import Item
from platformcode import config, logger from platformcode import config, logger
CHANNEL_HOST = "http://animeflv.me/" CHANNEL_HOST = "http://animeflv.co"
CHANNEL_DEFAULT_HEADERS = [ CHANNEL_DEFAULT_HEADERS = [
["User-Agent", "Mozilla/5.0"], ["User-Agent", "Mozilla/5.0"],
["Accept-Encoding", "gzip, deflate"], ["Accept-Encoding", "gzip, deflate"],
["Referer", CHANNEL_HOST] ["Referer", CHANNEL_HOST]
] ]
REGEX_NEXT_PAGE = r"class='current'>\d+?</li><li><a href=\"([^']+?)\"" REGEX_NEXT_PAGE = "class='current'>\d+?</li><li><a href='([^']+?)'"
REGEX_TITLE = r'(?:bigChar_a" href=.+?>)(.+?)(?:</a>)' REGEX_TITLE = r'(?:bigChar_a" href=.+?>)(.+?)(?:</a>)'
REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"' REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"'
REGEX_PLOT = r'<span class="info">Línea de historia:</span><p><span>(.*?)</span>' REGEX_PLOT = r'<span class="info">Línea de historia:</span><p><span>(.*?)</span>'
@@ -61,14 +61,6 @@ def get_cookie_value():
header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.co&Cookie=" + \ header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.co&Cookie=" + \
get_cookie_value() get_cookie_value()
def __find_next_page(html):
"""
Busca el enlace a la pagina siguiente
"""
return scrapertools.find_single_match(html, REGEX_NEXT_PAGE)
def __extract_info_from_serie(html): def __extract_info_from_serie(html):
title = scrapertools.find_single_match(html, REGEX_TITLE) title = scrapertools.find_single_match(html, REGEX_TITLE)
title = clean_title(title) title = clean_title(title)
@@ -131,15 +123,15 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="letras", itemlist.append(Item(channel=item.channel, action="letras",
title="Por orden alfabético")) title="Por orden alfabético"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros",
url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime"))) url= CHANNEL_HOST + "/ListadeAnime"))
itemlist.append(Item(channel=item.channel, action="series", title="Por popularidad", itemlist.append(Item(channel=item.channel, action="series", title="Por popularidad",
url=urlparse.urljoin(CHANNEL_HOST, "/ListadeAnime/MasVisto"))) url=CHANNEL_HOST + "/ListadeAnime/MasVisto"))
itemlist.append(Item(channel=item.channel, action="series", title="Novedades", itemlist.append(Item(channel=item.channel, action="series", title="Novedades",
url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/Nuevo"))) url=CHANNEL_HOST + "/ListadeAnime/Nuevo"))
itemlist.append(Item(channel=item.channel, action="series", title="Últimos", itemlist.append(Item(channel=item.channel, action="series", title="Últimos",
url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/LatestUpdate"))) url=CHANNEL_HOST + "/ListadeAnime/LatestUpdate"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
url=urlparse.urljoin(CHANNEL_HOST, "Buscar?s="))) url=CHANNEL_HOST + "/Buscar?s="))
itemlist = renumbertools.show_option(item.channel, itemlist) itemlist = renumbertools.show_option(item.channel, itemlist)
@@ -148,15 +140,11 @@ def mainlist(item):
def letras(item): def letras(item):
logger.info() logger.info()
base_url = 'http://animeflv.co/ListadeAnime?c=' base_url = 'http://animeflv.co/ListadeAnime?c='
itemlist = list() itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="#", url=base_url + "#")) itemlist.append(Item(channel=item.channel, action="series", title="#", url=base_url + "#"))
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ": for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
logger.debug("title=[%s], url=[%s], thumbnail=[]" % (letter, base_url + letter))
itemlist.append(Item(channel=item.channel, action="series", title=letter, url=base_url + letter)) itemlist.append(Item(channel=item.channel, action="series", title=letter, url=base_url + letter))
return itemlist return itemlist
@@ -172,8 +160,6 @@ def generos(item):
list_genre = re.findall(REGEX_GENERO, html) list_genre = re.findall(REGEX_GENERO, html)
for url, genero in list_genre: for url, genero in list_genre:
logger.debug("title=[%s], url=[%s], thumbnail=[]" % (genero, url))
itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url)) itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url))
return itemlist return itemlist
@@ -181,12 +167,9 @@ def generos(item):
def search(item, texto): def search(item, texto):
logger.info() logger.info()
texto = texto.replace(" ", "%20") texto = texto.replace(" ", "%20")
item.url = "%s%s" % (item.url, texto) item.url = "%s%s" % (item.url, texto)
html = get_url_contents(item.url) html = get_url_contents(item.url)
try: try:
# Se encontro un solo resultado y se redicciono a la página de la serie # Se encontro un solo resultado y se redicciono a la página de la serie
if html.find('<title>Ver') >= 0: if html.find('<title>Ver') >= 0:
@@ -198,9 +181,6 @@ def search(item, texto):
items = [] items = []
for show in show_list: for show in show_list:
title, url, thumbnail, plot = show title, url, thumbnail, plot = show
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail))
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item))) plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
except: except:
@@ -214,35 +194,25 @@ def search(item, texto):
def series(item): def series(item):
logger.info() logger.info()
page_html = get_url_contents(item.url) page_html = get_url_contents(item.url)
show_list = __find_series(page_html) show_list = __find_series(page_html)
items = [] items = []
for show in show_list: for show in show_list:
title, url, thumbnail, plot = show title, url, thumbnail, plot = show
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail))
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item))) plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
url_next_page = __find_next_page(page_html) url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
if url_next_page: if url_next_page:
items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_next_page)) items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url= CHANNEL_HOST + url_next_page))
return items return items
def episodios(item): def episodios(item):
logger.info() logger.info()
itemlist = [] itemlist = []
html_serie = get_url_contents(item.url) html_serie = get_url_contents(item.url)
info_serie = __extract_info_from_serie(html_serie) info_serie = __extract_info_from_serie(html_serie)
if info_serie[3]: if info_serie[3]:
plot = info_serie[3] plot = info_serie[3]
@@ -250,11 +220,9 @@ def episodios(item):
plot = '' plot = ''
episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL) episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL)
es_pelicula = False es_pelicula = False
for url, title, date in episodes: for url, title, date in episodes:
episode = scrapertools.find_single_match(title, r'Episodio (\d+)') episode = scrapertools.find_single_match(title, r'Episodio (\d+)')
# El enlace pertenece a un episodio # El enlace pertenece a un episodio
if episode: if episode:
season = 1 season = 1
@@ -268,9 +236,6 @@ def episodios(item):
title = "%s (%s)" % (title, date) title = "%s (%s)" % (title, date)
item.url = url item.url = url
es_pelicula = True es_pelicula = True
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, item.thumbnail))
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title))) plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title)))
@@ -318,7 +283,6 @@ def findvideos(item):
videoitem.thumbnail = item.thumbnail videoitem.thumbnail = item.thumbnail
regex_video_list = r'var part = \[([^\]]+)' regex_video_list = r'var part = \[([^\]]+)'
videos_html = scrapertools.find_single_match(iframe_html, regex_video_list) videos_html = scrapertools.find_single_match(iframe_html, regex_video_list)
videos = re.findall('"([^"]+)"', videos_html, re.DOTALL) videos = re.findall('"([^"]+)"', videos_html, re.DOTALL)
for quality_id, video_url in enumerate(videos): for quality_id, video_url in enumerate(videos):

View File

@@ -162,27 +162,20 @@ def novedades_anime(item):
def listado(item): def listado(item):
logger.info() logger.info()
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
# logger.debug("datito %s" % data)
url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">') url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination') data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')
matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?' matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
'<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>', '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
re.DOTALL).findall(data) re.DOTALL).findall(data)
itemlist = [] itemlist = []
for thumbnail, url, title, genres, plot in matches: for thumbnail, url, title, genres, plot in matches:
title = clean_title(title) title = clean_title(title)
url = urlparse.urljoin(HOST, url) url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail) thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot) fulltitle=title, plot=plot)
@@ -192,28 +185,22 @@ def listado(item):
else: else:
new_item.show = title new_item.show = title
new_item.context = renumbertools.context(item) new_item.context = renumbertools.context(item)
itemlist.append(new_item) itemlist.append(new_item)
if url_pagination: if url_pagination:
url = urlparse.urljoin(HOST, url_pagination) url = urlparse.urljoin(HOST, url_pagination)
title = ">> Pagina Siguiente" title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url)) itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
return itemlist return itemlist
def episodios(item): def episodios(item):
logger.info() logger.info()
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.plot == "": if item.plot == "":
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>') item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>') data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>')
matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data) matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data)
@@ -221,7 +208,6 @@ def episodios(item):
title = title.strip() title = title.strip()
url = urlparse.urljoin(item.url, url) url = urlparse.urljoin(item.url, url)
thumbnail = item.thumbnail thumbnail = item.thumbnail
try: try:
episode = int(scrapertools.find_single_match(title, "Episodio (\d+)")) episode = int(scrapertools.find_single_match(title, "Episodio (\d+)"))
except ValueError: except ValueError:
@@ -229,42 +215,36 @@ def episodios(item):
episode = 1 episode = 1
else: else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title, itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title,
fanart=thumbnail, contentType="episode")) fanart=thumbnail, contentType="episode"))
return itemlist return itemlist
def findvideos(item): def findvideos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
_id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/') _id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/')
post = "embed_id=%s" % _id post = "embed_id=%s" % _id
data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data
dict_data = jsontools.load(data) dict_data = jsontools.load(data)
headers = dict() headers = dict()
headers["Referer"] = item.url headers["Referer"] = item.url
data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data
dict_data = jsontools.load(data) dict_data = jsontools.load(data)
if not dict_data:
list_videos = dict_data["playlist"][0]["sources"] return itemlist
list_videos = dict_data["playlist"][0]
if isinstance(list_videos, list): if isinstance(list_videos, list):
for video in list_videos: for video in list_videos:
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show), itemlist.append(Item(channel=item.channel, action="play", url=video["file"],
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title, show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail)) thumbnail=item.thumbnail))
else: else:
for video in list_videos.values(): for video in list_videos.values():
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show), video += "|User-Agent=Mozilla/5.0"
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title, itemlist.append(Item(channel=item.channel, action="play", url=video, show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail)) thumbnail=item.thumbnail))
return itemlist return itemlist

View File

@@ -0,0 +1,50 @@
{
"id": "animemovil",
"name": "Animemovil",
"active": true,
"adult": false,
"language": ["*"],
"thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png",
"banner": "",
"version": 1,
"changes": [
{
"date": "24/10/2017",
"description": "Primera version"
}
],
"categories": [
"anime"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,406 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from core import httptools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import platformtools, config, logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
__perfil__ = int(config.get_setting('perfil', "animemovil"))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://animemovil.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", thumbnail=item.thumbnail,
url="%s/_API/?src=animesRecientes&offset=0" % host, text_color=color1))
itemlist.append(Item(channel=item.channel, action="emision", title="En emisión", thumbnail=item.thumbnail,
url="%s/anime/emision" % host, text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
text_color=color2))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
thumbnail=item.thumbnail, text_color=color3))
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
if renumbertools.context:
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def openconfig(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
item.url = "%s/?s=%s" % (host, texto.replace(" ", "+"))
try:
return recientes(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def recientes(item):
logger.info()
item.contentType = "tvshow"
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="emision"(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
try:
contentTitle = re.split(r"(?i) \d+ (?:Sub Español|Audio Español|Español Latino)", title)[0]
except:
contentTitle = ""
contentTitle = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", contentTitle)
tipo = "tvshow"
show = contentTitle
action = "episodios"
context = renumbertools.context
if item.extra == "recientes":
action = "findvideos"
context = ""
if not item.extra and (url.endswith("-pelicula/") or url.endswith("-pelicula")):
tipo = "movie"
show = ""
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
thumb_=thumb, contentType=tipo, context=context))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.extra and itemlist:
for it in itemlist:
it.thumbnail = it.thumb_
except:
pass
return itemlist
def listado(item):
logger.info()
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url).data)
for it in data.get("items", []):
scrapedtitle = it["title"]
url = "%s/%s" % (host, it["url"])
thumb = "http://img.animemovil.com/w440-h250-c/%s" % it["img"]
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
context=renumbertools.context, contentType=tipo))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if data["buttom"] and itemlist:
offset = int(scrapertools.find_single_match(item.url, 'offset=(\d+)')) + 1
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
def indices(item):
logger.info()
itemlist = []
if "Índices" in item.title:
itemlist.append(item.clone(title="Por Género", url="%s/anime/generos/" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime/" % host))
itemlist.append(item.clone(action="completo", title="Lista completa de Animes",
url="%s/anime/lista/" % host))
else:
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<div class="letras">(.*?)</div>')
patron = '<a title="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for title in matches:
if "Letra" in item.title:
url = "%s/_API/?src=animesLetra&offset=0&letra=%s" % (host, title)
else:
url = "%s/_API/?src=animesGenero&offset=0&genero=%s" % (host, title)
itemlist.append(item.clone(action="listado", url=url, title=title))
return itemlist
def completo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="listadoAnime">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
scrapedtitle = title
thumb = thumb.replace("s90-c", "w440-h250-c")
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, thumbnail=thumb,
text_color=color3, contentTitle=title, contentSerieName=show, extra="completo",
context=renumbertools.context, contentType=tipo, infoLabels=infoLabels))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
show = scrapertools.find_single_match(data, '<title>\s*([^<]+)\s*</title>')
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
for url, title in matches:
url = host + url
epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
new_item = item.clone(action="findvideos", url=url, title=title, extra="", context=renumbertools.context)
if epi:
season, episode = renumbertools.numbered_for_tratk(
item.channel, show, 1, int(epi))
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.title = "%sx%s %s" % (season, episode, title)
itemlist.append(new_item)
if item.infoLabels.get("tmdb_id") or item.extra == "recientes" or item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if config.get_videolibrary_support() and itemlist:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, text_color=color4, fanart=item.fanart,
thumbnail=item.thumbnail))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
if item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
data = httptools.downloadpage(item.url).data
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
if len(matches) == 1:
item.url = host + matches[0][0]
itemlist = findvideos(item)
else:
for url, title in matches:
itemlist.append(item.clone(action="findvideos", title=title, url=url, extra=""))
return itemlist
def emision(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloques = scrapertools.find_multiple_matches(data, '<div class="horario">.*?</i>\s*(.*?)</span>(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
for dia, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
itemlist.append(item.clone(action="", title=dia, text_color=color1))
for url, title, thumb in matches:
url = host + url
scrapedtitle = " %s" % title
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", title)
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context, infoLabels=infoLabels))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
id = scrapertools.find_single_match(data, '"id":"([^"]+)"')
bloque = scrapertools.find_single_match(data, 'ul class="controles">(.*?)</ul>')
patron = '<li title="([^"]+)" id="[^"]*" host="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for title, server in matches:
if title == "Vizard":
continue
title = "%s - %s" % (title, item.title)
post = "host=%s&id=%s" % (server, id)
itemlist.append(item.clone(action="play", url="http://server-2-stream.animemovil.com/V2/", title=title,
post=post))
downl = scrapertools.find_single_match(data, '<div class="descargarCap">.*?<a href="([^"]+)"')
if downl:
downl = downl.replace("&amp;", "&")
itemlist.append(item.clone(action="play", title="Descarga - %s" % item.title, url=downl, server="directo"))
if not itemlist:
itemlist.append(Item(channel=item.channel, title="No hay vídeos disponibles", action=""))
if item.extra == "recientes":
url = scrapertools.find_single_match(data, '<a class="CapList".*?href="([^"]+)"')
if url:
url = host + url
itemlist.append(item.clone(action="episodios", title="Ir a lista de capítulos", url=url, text_color=color1))
elif item.contentType == "movie" and config.get_library_support():
if "No hay vídeos disponibles" not in itemlist[0].title:
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
action="add_pelicula_to_library", contentTitle=item.contentTitle, text_color=color4,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def play(item):
logger.info()
if item.server:
return [item]
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url, item.post).data)
if data["jwplayer"] == False:
content = data["eval"]["contenido"]
urls = scrapertools.find_multiple_matches(content, 'file\s*:\s*"([^"]+)"')
if not urls:
urls = scrapertools.find_multiple_matches(content, '"GET","([^"]+)"')
for url in urls:
if "mediafire" in url:
data_mf = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data_mf, 'kNO\s*=\s*"([^"]+)"')
ext = url[-4:]
itemlist.insert(0, ["%s [directo]" % ext, url])
else:
if data["jwplayer"].get("sources"):
for source in data["jwplayer"]["sources"]:
label = source.get("label", "")
ext = source.get("type", "")
if ext and "/" in ext:
ext = ".%s " % ext.rsplit("/", 1)[1]
url = source.get("file")
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, ["%s%s [directo]" % (ext, label), url])
elif data["jwplayer"].get("file"):
label = data["jwplayer"].get("label", "")
url = data["jwplayer"]["file"]
ext = data["jwplayer"].get("type", "")
if ext and "/" in ext:
ext = "%s " % ext.rsplit("/", 1)[1]
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, [".%s%s [directo]" % (ext, label), url])
return itemlist
def newest(categoria):
logger.info()
item = Item()
try:
item.url = "http://skanime.net/"
item.extra = "novedades"
itemlist = recientes(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -43,7 +43,7 @@ def findvideos(item):
for thumbnail, title, url, time in matches: for thumbnail, title, url, time in matches:
scrapedtitle = time + " - " + title scrapedtitle = time + " - " + title
scrapedurl = host + url scrapedurl = host + url
scrapedthumbnail = "http:" + thumbnail scrapedthumbnail = thumbnail
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail)) thumbnail=scrapedthumbnail))
@@ -80,7 +80,7 @@ def play(item):
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
url = "http:" + scrapertools.find_single_match(data, '<source src="([^"]+)"') url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
itemlist.append(item.clone(url=url, server="directo")) itemlist.append(item.clone(url=url, server="directo"))
return itemlist return itemlist

View File

@@ -1,8 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import re import re
from channels import renumbertools
from channelselector import get_thumb from channelselector import get_thumb
from core import httptools from core import httptools
from core import scrapertools from core import scrapertools
@@ -33,7 +32,6 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host, itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series)) thumbnail=thumb_series))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist) autoplay.show_option(item.channel, itemlist)
return itemlist return itemlist
@@ -71,7 +69,7 @@ def lista_gen(item):
title = scrapedtitle + " [ " + scrapedlang + "]" title = scrapedtitle + " [ " + scrapedlang + "]"
itemlist.append( itemlist.append(
Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios", Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle, context=renumbertools.context(item))) show=scrapedtitle))
tmdb.set_infoLabels(itemlist) tmdb.set_infoLabels(itemlist)
# Paginacion # Paginacion
patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">' patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">'
@@ -98,7 +96,7 @@ def lista(item):
for link, name in matches: for link, name in matches:
title = name + " [Latino]" title = name + " [Latino]"
url = link url = link
context1=[renumbertools.context(item), autoplay.context] context1=[autoplay.context]
itemlist.append( itemlist.append(
item.clone(title=title, url=url, plot=title, action="episodios", show=title, item.clone(title=title, url=url, plot=title, action="episodios", show=title,
context=context1)) context=context1))
@@ -129,31 +127,23 @@ def episodios(item):
number = 0 number = 0
ncap = 0 ncap = 0
A = 1 A = 1
tempo=1
for temp, link, name in matches: for temp, link, name in matches:
if A != temp: if A != temp and "Ranma" not in show:
number = 0 number = 0
number = number + 1
if "Ranma" in show: if "Ranma" in show:
number = int(temp) number,tempo=renumerar_ranma(number,tempo,18+1,1)
temp = str(1) number,tempo=renumerar_ranma(number,tempo,22+1,2)
else: number,tempo=renumerar_ranma(number,tempo,24+1,3)
number = number + 1 number,tempo=renumerar_ranma(number,tempo,24+1,4)
if number < 10: number,tempo=renumerar_ranma(number,tempo,24+1,5)
capi = "0" + str(number) number,tempo=renumerar_ranma(number,tempo,24+1,6)
else: capi=str(number).zfill(2)
capi = str(number)
if "Ranma" in show: if "Ranma" in show:
season = 1 title = "{0}x{1} - ({2})".format(str(tempo), capi, name)
episode = number
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
date = name
if episode < 10:
capi = "0" + str(episode)
else:
capi = episode
title = str(season) + "x" + str(capi) + " - " + name # "{0}x{1} - ({2})".format(season, episode, date)
else: else:
title = str(temp) + "x" + capi + " - " + name title = "{0}x{1} - ({2})".format(str(temp), capi, name)
url = link url = link
A = temp A = temp
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show))
@@ -165,6 +155,11 @@ def episodios(item):
return itemlist return itemlist
def renumerar_ranma(number,tempo,final,actual):
if number==final and tempo==actual:
tempo=tempo+1
number=1
return number, tempo
def findvideos(item): def findvideos(item):
logger.info() logger.info()

View File

@@ -30,7 +30,7 @@ def mainlist(item):
data = httptools.downloadpage(CHANNEL_HOST).data data = httptools.downloadpage(CHANNEL_HOST).data
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>") total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>")
titulo = "Peliculas (%s)" % total titulo = "Peliculas"
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True)) itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula", itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
@@ -283,7 +283,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
if type == "descarga": t_tipo = "Descargar" if type == "descarga": t_tipo = "Descargar"
data = data.replace("\n", "") data = data.replace("\n", "")
if type == "online": if type == "online":
patron = '(?is)class="playex.*?visualizaciones' patron = '(?is)class="playex.*?sheader'
bloque1 = scrapertools.find_single_match(data, patron) bloque1 = scrapertools.find_single_match(data, patron)
patron = '(?is)#(option-[^"]+).*?png">([^<]+)' patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
match = scrapertools.find_multiple_matches(data, patron) match = scrapertools.find_multiple_matches(data, patron)
@@ -303,7 +303,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single') bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
bloque2 = bloque2.replace("\t", "").replace("\r", "") bloque2 = bloque2.replace("\t", "").replace("\r", "")
patron = '(?s)optn" href="([^"]+)' patron = '(?s)optn" href="([^"]+)'
patron += '.*?title="([^\.]+)' patron += '.*?alt="([^\.]+)'
patron += '.*?src.*?src="[^>]+"?/>([^<]+)' patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
patron += '.*?src="[^>]+"?/>([^<]+)' patron += '.*?src="[^>]+"?/>([^<]+)'
patron += '.*?/span>([^<]+)' patron += '.*?/span>([^<]+)'

View File

@@ -1,37 +0,0 @@
{
"id": "crimenes",
"name": "Crimenes Imperfectos",
"active": true,
"adult": false,
"language": ["cast"],
"banner": "crimenes.png",
"thumbnail": "crimenes.png",
"version": 1,
"changes": [
{
"date": "19/06/2017",
"description": "correcion xml"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,167 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
import xbmc
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
# Main list manual
def listav(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patronbloque = '<li><div class="yt-lockup.*?<img.*?src="([^"]+)".*?'
patronbloque += '<h3 class="yt-lockup-title "><a href="([^"]+)".*?title="([^"]+)".*?'
patronbloque += '</a><span class=.*?">(.*?)</span></h3>'
matchesbloque = re.compile(patronbloque, re.DOTALL).findall(data)
scrapertools.printMatches(matchesbloque)
scrapedduration = ''
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matchesbloque:
scrapedtitle = '[COLOR white]' + scrapedtitle + '[/COLOR] [COLOR red]' + scrapedduration + '[/COLOR]'
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.thumbnail, scrapedthumbnail)
xbmc.log("$ " + scrapedurl + " " + scrapedtitle + " " + scrapedthumbnail)
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=scrapedtitle, url=url,
thumbnail=thumbnail, fanart=thumbnail))
# Paginacion
patronbloque = '<div class="branded-page-box .*? spf-link ">(.*?)</div>'
matches = re.compile(patronbloque, re.DOTALL).findall(data)
for bloque in matches:
patronvideo = '<a href="([^"]+)"'
matchesx = re.compile(patronvideo, re.DOTALL).findall(bloque)
for scrapedurl in matchesx:
url = urlparse.urljoin(item.url, 'https://www.youtube.com' + scrapedurl)
# solo me quedo con el ultimo enlace
itemlist.append(
Item(channel=item.channel, action="listav", title="Siguiente pag >>", fulltitle="Siguiente Pag >>", url=url,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def busqueda(item):
itemlist = []
keyboard = xbmc.Keyboard("", "Busqueda")
keyboard.doModal()
if (keyboard.isConfirmed()):
myurl = keyboard.getText().replace(" ", "+")
data = scrapertools.cache_page('https://www.youtube.com/results?q=' + myurl)
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
patronbloque = '<li><div class="yt-lockup.*?<img.*?src="([^"]+)".*?'
patronbloque += '<h3 class="yt-lockup-title "><a href="([^"]+)".*?title="([^"]+)".*?'
patronbloque += '</a><span class=.*?">(.*?)</span></h3>'
matchesbloque = re.compile(patronbloque, re.DOTALL).findall(data)
scrapertools.printMatches(matchesbloque)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduracion in matchesbloque:
scrapedtitle = scrapedtitle + ' ' + scrapedduracion
url = scrapedurl
thumbnail = scrapedthumbnail
xbmc.log("$ " + scrapedurl + " " + scrapedtitle + " " + scrapedthumbnail)
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=scrapedtitle, url=url,
thumbnail=thumbnail, fanart=thumbnail))
# Paginacion
patronbloque = '<div class="branded-page-box .*? spf-link ">(.*?)</div>'
matches = re.compile(patronbloque, re.DOTALL).findall(data)
for bloque in matches:
patronvideo = '<a href="([^"]+)"'
matchesx = re.compile(patronvideo, re.DOTALL).findall(bloque)
for scrapedurl in matchesx:
url = 'https://www.youtube.com' + scrapedurl
# solo me quedo con el ultimo enlace
itemlist.append(
Item(channel=item.channel, action="listav", title="Siguiente pag >>", fulltitle="Siguiente Pag >>",
url=url))
return itemlist
else:
# xbmcgui.Dialog().ok(item.channel, "nada que buscar")
# xbmc.executebuiltin("Action(up)")
xbmc.executebuiltin("Action(enter)")
# itemlist.append( Item(channel=item.channel, action="listav", title="<< Volver", fulltitle="Volver" , url="history.back()") )
def mainlist(item):
logger.info()
itemlist = []
item.url = 'https://www.youtube.com/results?q=crimenes+imperfectos&sp=CAI%253D'
scrapedtitle = "[COLOR white]Crimenes [COLOR red]Imperfectos[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcQ2PcyvcYIg6acvdUZrHGFFk_E3mXK9QSh-5TypP8Rk6zQ6S1yb2g")
item.fanart = urlparse.urljoin(item.fanart,
"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcQ2PcyvcYIg6acvdUZrHGFFk_E3mXK9QSh-5TypP8Rk6zQ6S1yb2g")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
item.url = 'https://www.youtube.com/results?search_query=russian+dash+cam&sp=CAI%253D'
scrapedtitle = "[COLOR blue]Russian[/COLOR] [COLOR White]Dash[/COLOR] [COLOR red]Cam[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail, "https://i.ytimg.com/vi/-C6Ftromtig/maxresdefault.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcRQLO-n-kO1ByY8lLhKxz0-cejJD1J7rLge_j0E0Gh9LJ2WtTbSnA")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
item.url = 'https://www.youtube.com/results?search_query=cuarto+milenio+programa+completo&sp=CAI%253D'
scrapedtitle = "[COLOR green]Cuarto[/COLOR] [COLOR White]Milenio[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
item.url = 'https://www.youtube.com/results?q=milenio+3&sp=CAI%253D'
scrapedtitle = "[COLOR green]Milenio[/COLOR] [COLOR White]3- Podcasts[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
scrapedtitle = "[COLOR red]buscar ...[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg")
itemlist.append(Item(channel=item.channel, action="busqueda", title=scrapedtitle, fulltitle=scrapedtitle,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def play(item):
logger.info("url=" + item.url)
itemlist = servertools.find_video_items(data=item.url)
return itemlist

View File

@@ -157,11 +157,10 @@ def lista(item):
# logger.info("data="+data) # logger.info("data="+data)
bloque = scrapertools.find_single_match(data, '(?:<ul class="pelilist">|<ul class="buscar-list">)(.*?)</ul>') bloque = scrapertools.find_single_match(data, '(?:<ul class="pelilist">|<ul class="buscar-list">)(.*?)</ul>')
patron = '<li[^<]+' patron = '<a href="([^"]+).*?' # la url
patron += '<a href="([^"]+)".*?' patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
patron += 'src="([^"]+)".*?' patron += '<h2[^>]*>(.*?)</h2.*?' # el titulo
patron += '<h2[^>]*>(.*?)</h2.*?' patron += '<span>([^<].*?)<' # la calidad
patron += '(?:<strong[^>]*>|<span[^>]*>)(.*?)(?:</strong>|</span>)'
matches = re.compile(patron, re.DOTALL).findall(bloque) matches = re.compile(patron, re.DOTALL).findall(bloque)
scrapertools.printMatches(matches) scrapertools.printMatches(matches)
@@ -175,7 +174,7 @@ def lista(item):
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
plot = "" plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
contentTitle = scrapertools.htmlclean(scrapedtitle).strip() contentTitle = scrapertools.htmlclean(scrapedtitle).strip()
patron = '([^<]+)<br>' patron = '([^<]+)<br>'
matches = re.compile(patron, re.DOTALL).findall(calidad + '<br>') matches = re.compile(patron, re.DOTALL).findall(calidad + '<br>')
@@ -196,7 +195,7 @@ def lista(item):
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle, thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
language=idioma, contentSeason=int(temporada), language=idioma, contentSeason=int(temporada),
contentEpisodeNumber=int(episodio), contentQuality=calidad)) contentEpisodeNumber=int(episodio), quality=calidad))
else: else:
if len(matches) == 2: if len(matches) == 2:
@@ -205,7 +204,7 @@ def lista(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle, thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
language=idioma, contentThumbnail=thumbnail, contentQuality=calidad)) language=idioma, contentThumbnail=thumbnail, quality=calidad))
next_page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>') next_page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>')
if next_page_url != "": if next_page_url != "":
@@ -262,7 +261,7 @@ def findvideos(item):
item.plot = scrapertools.htmlclean(item.plot).strip() item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot item.contentPlot = item.plot
link = scrapertools.find_single_match(data, 'href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=([^"]+)"') link = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
if link != "": if link != "":
link = "http://www.divxatope1.com/" + link link = "http://www.divxatope1.com/" + link
logger.info("torrent=" + link) logger.info("torrent=" + link)
@@ -275,14 +274,7 @@ def findvideos(item):
patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?' patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
patron += '<\/div[^<]+<div class="box6">([^<]+)<' patron += '<\/div[^<]+<div class="box6">([^<]+)<'
#patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
#patron += '<div class="box2">([^<]+)</div[^<]+'
#patron += '<div class="box3">([^<]+)</div[^<]+'
#patron += '<div class="box4">([^<]+)</div[^<]+'
#patron += '<div class="box5">(.*?)</div[^<]+'
#patron += '<div class="box6">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist_ver = [] itemlist_ver = []
itemlist_descargar = [] itemlist_descargar = []
@@ -308,11 +300,8 @@ def findvideos(item):
else: else:
itemlist_descargar.append(new_item) itemlist_descargar.append(new_item)
for new_item in itemlist_ver: itemlist.extend(itemlist_ver)
itemlist.append(new_item) itemlist.extend(itemlist_descargar)
for new_item in itemlist_descargar:
itemlist.append(new_item)
return itemlist return itemlist

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import re import re
from channelselector import get_thumb from channelselector import get_thumb
@@ -53,8 +53,7 @@ def listado(item):
patron += '<b>Categoria:\s*</b>([^&]+)&raquo;\s*([^<]+).*?' patron += '<b>Categoria:\s*</b>([^&]+)&raquo;\s*([^<]+).*?'
patron += '<div class="OpcionesDescargasMini">(.*?)</div>' patron += '<div class="OpcionesDescargasMini">(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, title, cat_padres, cat_hijos, opciones in matches: for thumbnail, title, cat_padres, cat_hijos, opciones in matches:
# logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones) # logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones)
# Obtenemos el año del titulo y eliminamos lo q sobre # Obtenemos el año del titulo y eliminamos lo q sobre
@@ -70,7 +69,7 @@ def listado(item):
thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:] thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:]
# Buscamos opcion de ver online # Buscamos opcion de ver online
patron = '<a href="http://estrenosly.org/ver-online-([^"]+)' patron = '<a href="http://estrenos.*?/ver-online-([^"]+)'
url_ver = scrapertools.find_single_match(opciones, patron) url_ver = scrapertools.find_single_match(opciones, patron)
if url_ver: if url_ver:
new_item = Item(channel=item.channel, action="findvideos", title=title, new_item = Item(channel=item.channel, action="findvideos", title=title,

View File

@@ -616,6 +616,8 @@ def findvideos(item):
url_targets = item.url url_targets = item.url
## Vídeos ## Vídeos
id = ""
type = ""
if "###" in item.url: if "###" in item.url:
id = item.url.split("###")[1].split(";")[0] id = item.url.split("###")[1].split(";")[0]
type = item.url.split("###")[1].split(";")[1] type = item.url.split("###")[1].split(";")[1]
@@ -698,6 +700,9 @@ def findvideos(item):
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize()) it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server)) it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:
if "###" not in item.url:
item.url += "###" + id + ";" + type
itemlist.extend(it1) itemlist.extend(it1)
itemlist.extend(it2) itemlist.extend(it2)
## 2 = película ## 2 = película
@@ -707,7 +712,6 @@ def findvideos(item):
action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail, action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail,
fulltitle = item.contentTitle fulltitle = item.contentTitle
)) ))
return itemlist return itemlist

View File

@@ -98,10 +98,11 @@ def peliculas(item):
url_next_page = '' url_next_page = ''
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data) data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
patron += '<span class="quality">([^<]+)</span><a href="([^"]+)">.*?' # calidad, url patron += '<span class="quality">([^<]+)</span></div><a href="([^"]+)">.*?' # calidad, url
patron += '<span>([^<]+)</span>' # year patron += '<span>([^<]+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data, patron)

View File

@@ -12,15 +12,8 @@ from core import tmdb
from core.item import Item from core.item import Item
from platformcode import config, logger from platformcode import config, logger
__channel__='allcalidad' host = "http://www.pelismundo.com"
idiomas = {"Castellano":"CAST","Subtitulad":"VOSE","Latino":"LAT"}
host = "http://www.pelismundo.com/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item): def mainlist(item):
logger.info() logger.info()
@@ -80,10 +73,9 @@ def sub_search(item):
patron = '(?s)href="([^"]+)".*?' patron = '(?s)href="([^"]+)".*?'
patron += 'title="([^"]+)".*?' patron += 'title="([^"]+)".*?'
patron += 'src="([^"]+)".*?' patron += 'src="([^"]+)".*?'
patron += 'Idioma.*?tag">([^<]+).*?' patron += 'Idioma(.*?)Cal'
patron += 'Calidad(.*?<)\/' patron += 'idad(.*?<)\/'
match = scrapertools.find_multiple_matches(bloque, patron) match = scrapertools.find_multiple_matches(bloque, patron)
scrapertools.printMatches(match)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlanguages, scrapedquality in match: for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlanguages, scrapedquality in match:
year = scrapertools.find_single_match(scrapedtitle, '[0-9]{4}') year = scrapertools.find_single_match(scrapedtitle, '[0-9]{4}')
scrapedquality = scrapertools.find_single_match(scrapedquality, 'rel="tag">([^<]+)<') scrapedquality = scrapertools.find_single_match(scrapedquality, 'rel="tag">([^<]+)<')
@@ -93,21 +85,14 @@ def sub_search(item):
scrapedtitle = scrapedtitle.replace(st, "") scrapedtitle = scrapedtitle.replace(st, "")
title = scrapedtitle title = scrapedtitle
if year: if year:
title += " (" + year + ")" title += "(" + year + ")"
if scrapedquality: if scrapedquality:
title += " (" + scrapedquality + ")" title += " (" + scrapedquality + ")"
patronidiomas = ''
idiomas_disponibles = [] idiomas_disponibles = []
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Castellano')
if matchidioma:
idiomas_disponibles.append("ESP")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Subtitulado')
if matchidioma:
idiomas_disponibles.append("VOSE")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Latino')
if matchidioma:
idiomas_disponibles.append("LAT")
idiomas_disponibles1 = "" idiomas_disponibles1 = ""
for lang in idiomas.keys():
if lang in scrapedlanguages:
idiomas_disponibles.append(idiomas[lang])
if idiomas_disponibles: if idiomas_disponibles:
idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]" idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]"
title += " %s" %idiomas_disponibles1 title += " %s" %idiomas_disponibles1
@@ -139,7 +124,7 @@ def filtro(item):
patron += '</span>([^<]+)</a>' patron += '</span>([^<]+)</a>'
matches = scrapertools.find_multiple_matches(bloque, patron) matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title in matches: for url, title in matches:
if "eroti33cas" in title and config.get_setting("adult_mode") == 0: if "eroticas" in title and config.get_setting("adult_mode") == 0:
continue continue
itemlist.append(item.clone(action = "peliculas", itemlist.append(item.clone(action = "peliculas",
title = title.title(), title = title.title(),
@@ -171,17 +156,10 @@ def peliculas(item):
title += " (" + year + ")" title += " (" + year + ")"
if scrapedquality: if scrapedquality:
title += " (" + scrapedquality + ")" title += " (" + scrapedquality + ")"
patronidiomas = ''
idiomas_disponibles = [] idiomas_disponibles = []
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Castellano') for lang in idiomas.keys():
if matchidioma: if lang in scrapedlanguages:
idiomas_disponibles.append("ESP") idiomas_disponibles.append(idiomas[lang])
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Subtitulado')
if matchidioma:
idiomas_disponibles.append("VOSE")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Latino')
if matchidioma:
idiomas_disponibles.append("LAT")
idiomas_disponibles1 = "" idiomas_disponibles1 = ""
if idiomas_disponibles: if idiomas_disponibles:
idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]" idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]"
@@ -219,7 +197,7 @@ def findvideos(item):
title = "Ver en: %s " + "(" + scrapedlanguage + ")" title = "Ver en: %s " + "(" + scrapedlanguage + ")"
itemlist.append(item.clone(action = "play", itemlist.append(item.clone(action = "play",
title = title, title = title,
language = item.language, language = scrapedlanguage,
quality = item.quality, quality = item.quality,
url = scrapedurl url = scrapedurl
)) ))

View File

@@ -195,8 +195,9 @@ def lista(item):
# de tmdb # de tmdb
filtro_list = filtro_list.items() filtro_list = filtro_list.items()
if item.title != 'Buscar': if item.title != 'Buscar':
itemlist.append( new_item=(
Item(channel=item.channel, Item(channel=item.channel,
contentType=tipo, contentType=tipo,
action=accion, action=accion,
@@ -205,11 +206,14 @@ def lista(item):
thumbnail=thumbnail, thumbnail=thumbnail,
fulltitle=scrapedtitle, fulltitle=scrapedtitle,
infoLabels={'filtro': filtro_list}, infoLabels={'filtro': filtro_list},
contentTitle=scrapedtitle,
contentSerieName=scrapedtitle,
extra=item.extra, extra=item.extra,
context=autoplay.context context=autoplay.context
)) ))
if 'serie' in scrapedurl:
new_item.contentSerieName=scrapedtitle
else:
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
else: else:
item.extra = item.extra.rstrip('s/') item.extra = item.extra.rstrip('s/')
if item.extra in url: if item.extra in url:
@@ -222,11 +226,14 @@ def lista(item):
thumbnail=scrapedthumbnail, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, fulltitle=scrapedtitle,
infoLabels={'filtro': filtro_list}, infoLabels={'filtro': filtro_list},
contentTitle=scrapedtitle,
contentSerieName=scrapedtitle,
extra=item.extra, extra=item.extra,
context=autoplay.context context=autoplay.context
)) ))
if 'serie' in scrapedurl:
new_item.contentSerieName=scrapedtitle
else:
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

View File

@@ -777,7 +777,7 @@ def acciones_cuenta(item):
for category, contenido in matches: for category, contenido in matches:
itemlist.append(item.clone(action="", title=category, text_color=color3)) itemlist.append(item.clone(action="", title=category, text_color=color3))
patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src="\.([^"]+)".*?serie="([^"]*)".*?' \ patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src="([^"]+)".*?serie="([^"]*)".*?' \
'<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>' '<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
entradas = scrapertools.find_multiple_matches(contenido, patron) entradas = scrapertools.find_multiple_matches(contenido, patron)
for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas: for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas:

View File

@@ -7,7 +7,7 @@ from core import scrapertools
from core.item import Item from core.item import Item
from platformcode import logger from platformcode import logger
host = "http://www.playpornx.net/list-movies/" host = "http://www.playpornx.net/"
def mainlist(item): def mainlist(item):
@@ -15,7 +15,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Todas", action="lista", itemlist.append(Item(channel=item.channel, title="Todas", action="lista",
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
url ='https://www.playpornx.net/category/porn-movies/?filter=date')) url =host))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url='http://www.playpornx.net/?s=', itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url='http://www.playpornx.net/?s=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
@@ -31,10 +31,10 @@ def lista(item):
if item.url == '': item.url = host if item.url == '': item.url = host
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data) data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'role=article><a href=(.*?) rel=bookmark title=(.*?)>.*?src=(.*?) class' patron = '<div class=item>.*?href=(.*?)><div.*?<img src=(.*?) alt.*?<h2>(.*?)<\/h2>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches: for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl url = scrapedurl
thumbnail = scrapedthumbnail thumbnail = scrapedthumbnail
title = scrapedtitle title = scrapedtitle

View File

@@ -55,6 +55,7 @@ def lista(item):
# Paginacion # Paginacion
num_items_x_pagina = 30 num_items_x_pagina = 30
min = item.page * num_items_x_pagina min = item.page * num_items_x_pagina
min=min-item.page
max = min + num_items_x_pagina - 1 max = min + num_items_x_pagina - 1
for link, img, name in matches[min:max]: for link, img, name in matches[min:max]:

View File

@@ -266,7 +266,6 @@ def lista(item):
contentTitle=scrapedtitle, contentTitle=scrapedtitle,
extra=item.extra, extra=item.extra,
infoLabels={'year': year}, infoLabels={'year': year},
show=scrapedtitle,
list_language=list_language, list_language=list_language,
context=autoplay.context context=autoplay.context
)) ))

View File

@@ -1,100 +0,0 @@
{
"id": "vixto",
"name": "Vixto",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "vixto.png",
"thumbnail": "http://i.imgur.com/y4c4HT2.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "16/02/2017",
"description": "Correccion para el apartado de series"
},
{
"date": "12/11/2016",
"description": "Primera version, sustituye a oranline"
}
],
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"VOSE",
"Latino",
"Español",
"No filtrar"
]
},
{
"id": "filterlinks",
"type": "list",
"label": "Mostrar enlaces de tipo...",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Solo Descarga",
"Solo Online",
"No filtrar"
]
},
{
"id": "orderlinks",
"type": "list",
"label": "Ordenar enlaces por...",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Servidor",
"Idioma",
"Más recientes"
]
}
]
}

View File

@@ -1,383 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
# Configuracion del canal
__modo_grafico__ = config.get_setting('modo_grafico', "vixto")
__perfil__ = config.get_setting('perfil', "vixto")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
host = "http://www.vixto.net/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Películas", text_color=color2, action="",
text_bold=True))
itemlist.append(item.clone(action="listado", title=" Estrenos", text_color=color1, url=host,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/"
"0/Directors%20Chair.png"))
itemlist.append(item.clone(action="listado", title=" Novedades", text_color=color1, url=host,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/"
"0/Directors%20Chair.png"))
itemlist.append(item.clone(action="listado", title="Series - Novedades", text_color=color2, url=host,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/"
"0/TV%20Series.png", text_bold=True))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
url="http://www.vixto.net/buscar?q="))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%{0}".format(line))
return []
def newest(categoria):
logger.info()
itemlist = list()
item = Item()
try:
if categoria == 'peliculas':
item.url = host
itemlist = listado(item)
if itemlist[-1].action == "listado":
itemlist.pop()
item.title = "Estrenos"
itemlist.extend(listado(item))
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def listado(item):
logger.info()
itemlist = list()
item.infoLabels['mediatype'] = "movie"
if "Estrenos" in item.title:
bloque_head = "ESTRENOS CARTELERA"
elif "Series" in item.title:
bloque_head = "RECIENTE SERIES"
item.infoLabels['mediatype'] = "tvshow"
else:
bloque_head = "RECIENTE PELICULAS"
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, bloque_head + '\s*</h2>(.*?)</section>')
patron = '<div class="".*?href="([^"]+)".*?src="([^"]+)".*?<div class="calZG">(.*?)</div>' \
'(.*?)</div>.*?href.*?>(.*?)</a>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, calidad, idiomas, scrapedtitle in matches:
title = scrapedtitle
langs = []
if 'idio idi1' in idiomas:
langs.append("VOS")
if 'idio idi2' in idiomas:
langs.append("LAT")
if 'idio idi4' in idiomas:
langs.append("ESP")
if langs:
title += " [%s]" % "/".join(langs)
if calidad:
title += " %s" % calidad
filtro_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w342", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
if item.contentType == "tvshow":
new_item = item.clone(action="episodios", title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list},
contentTitle=scrapedtitle, context="buscar_trailer", text_color=color1,
show=scrapedtitle, text_bold=False)
else:
new_item = item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list}, text_bold=False,
contentTitle=scrapedtitle, context="buscar_trailer", text_color=color1)
itemlist.append(new_item)
if item.action == "listado":
try:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
return itemlist
def busqueda(item):
logger.info()
itemlist = list()
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '<h2>Peliculas</h2>(.*?)</div>')
bloque += scrapertools.find_single_match(data, '<h2>Series</h2>(.*?)</div>')
patron = '<figure class="col-lg-2.*?href="([^"]+)".*?src="([^"]+)".*?<figcaption title="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
peliculas = False
series = False
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
new_item = Item(channel=item.channel, contentType="movie", url=scrapedurl, title=" " + scrapedtitle,
text_color=color1, context="buscar_trailer", fulltitle=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, action="findvideos")
if "/peliculas/" in scrapedurl and not peliculas:
itemlist.append(Item(channel=item.channel, action="", title="Películas", text_color=color2))
peliculas = True
if "/series/" in scrapedurl and not series:
itemlist.append(Item(channel=item.channel, action="", title="Series", text_color=color2))
series = True
if "/series/" in scrapedurl:
new_item.contentType = "tvshow"
new_item.show = scrapedtitle
new_item.action = "episodios"
filtro_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w342", "")
filtro_list = {"poster_path": filtro_thumb}
new_item.infoLabels["filtro"] = filtro_list.items()
itemlist.append(new_item)
try:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
return itemlist
def episodios(item):
logger.info()
itemlist = list()
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '<strong>Temporada:(.*?)</div>')
matches = scrapertools.find_multiple_matches(bloque, 'href="([^"]+)">(.*?)</a>')
for scrapedurl, scrapedtitle in matches:
title = "Temporada %s" % scrapedtitle
new_item = item.clone(action="", title=title, text_color=color2)
new_item.infoLabels["season"] = scrapedtitle
new_item.infoLabels["mediatype"] = "season"
data_season = httptools.downloadpage(scrapedurl).data
data_season = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data_season)
patron = '<li class="media">.*?href="([^"]+)"(.*?)<div class="media-body">.*?href.*?>' \
'(.*?)</a>'
matches = scrapertools.find_multiple_matches(data_season, patron)
elementos = []
for url, status, title in matches:
if not "Enlaces Disponibles" in status:
continue
elementos.append(title)
item_epi = item.clone(action="findvideos", url=url, text_color=color1)
item_epi.infoLabels["season"] = scrapedtitle
episode = scrapertools.find_single_match(title, 'Capitulo (\d+)')
titulo = scrapertools.find_single_match(title, 'Capitulo \d+\s*-\s*(.*?)$')
item_epi.infoLabels["episode"] = episode
item_epi.infoLabels["mediatype"] = "episode"
item_epi.title = "%sx%s %s" % (scrapedtitle, episode.zfill(2), titulo)
itemlist.insert(0, item_epi)
if elementos:
itemlist.insert(0, new_item)
if item.infoLabels["tmdb_id"] and itemlist:
try:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if itemlist:
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", text_color="green",
filtro=True, action="add_serie_to_library", fulltitle=item.fulltitle,
extra="episodios", url=item.url, infoLabels=item.infoLabels, show=item.show))
else:
itemlist.append(item.clone(title="Serie sin episodios disponibles", action="", text_color=color3))
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
try:
filtro_idioma = config.get_setting("filterlanguages", item.channel)
filtro_enlaces = config.get_setting("filterlinks", item.channel)
except:
filtro_idioma = 3
filtro_enlaces = 2
dict_idiomas = {'Castellano': 2, 'Latino': 1, 'Subtitulada': 0}
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)
if not item.infoLabels["tmdb_id"]:
year = scrapertools.find_single_match(data, 'Lanzamiento.*?(\d{4})')
if year != "":
item.infoLabels['filtro'] = ""
item.infoLabels['year'] = int(year)
# Ampliamos datos en tmdb
try:
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if not item.infoLabels['plot']:
plot = scrapertools.find_single_match(data, '<p class="plot">(.*?)</p>')
item.infoLabels['plot'] = plot
if filtro_enlaces != 0:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Ver Online", item)
if list_enlaces:
itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1,
text_bold=True))
itemlist.extend(list_enlaces)
if filtro_enlaces != 1:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Descarga Directa", item)
if list_enlaces:
itemlist.append(item.clone(action="", title="Enlaces Descargas", text_color=color1,
text_bold=True))
itemlist.extend(list_enlaces)
# Opción "Añadir esta película a la videoteca de XBMC"
if itemlist and item.contentType == "movie":
contextual = config.is_xbmc()
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta", contextual=contextual))
if item.extra != "findvideos":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", fulltitle=item.fulltitle,
extra="findvideos", url=item.url, infoLabels=item.infoLabels,
contentType=item.contentType, contentTitle=item.contentTitle, show=item.show))
elif not itemlist and item.contentType == "movie":
itemlist.append(item.clone(title="Película sin enlaces disponibles", action="", text_color=color3))
return itemlist
def bloque_enlaces(data, filtro_idioma, dict_idiomas, tipo, item):
logger.info()
lista_enlaces = list()
bloque = scrapertools.find_single_match(data, tipo + '(.*?)</table>')
patron = '<td class="sape">\s*<i class="idioma-([^"]+)".*?href="([^"]+)".*?</p>.*?<td>([^<]+)</td>' \
'.*?<td class="desaparecer">(.*?)</td>'
matches = scrapertools.find_multiple_matches(bloque, patron)
filtrados = []
for language, scrapedurl, calidad, orden in matches:
language = language.strip()
server = scrapertools.find_single_match(scrapedurl, 'http(?:s|)://(?:www.|)(\w+).')
if server == "ul":
server = "uploadedto"
if server == "streamin":
server = "streaminto"
if server == "waaw":
server = "netutv"
if servertools.is_server_enabled(server):
try:
servers_module = __import__("servers." + server)
title = " Mirror en " + server + " (" + language + ") (Calidad " + calidad.strip() + ")"
if filtro_idioma == 3 or item.filtro:
lista_enlaces.append(item.clone(title=title, action="play", server=server, text_color=color2,
url=scrapedurl, idioma=language, orden=orden, language=language))
else:
idioma = dict_idiomas[language]
if idioma == filtro_idioma:
lista_enlaces.append(item.clone(title=title, text_color=color2, action="play",
url=scrapedurl, server=server, idioma=language, orden=orden,
language=language))
else:
if language not in filtrados:
filtrados.append(language)
except:
pass
order = config.get_setting("orderlinks", item.channel)
if order == 0:
lista_enlaces.sort(key=lambda item: item.server)
elif order == 1:
lista_enlaces.sort(key=lambda item: item.idioma)
else:
lista_enlaces.sort(key=lambda item: item.orden, reverse=True)
if filtro_idioma != 3:
if len(filtrados) > 0:
title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
filtro=True))
return lista_enlaces
def play(item):
logger.info()
itemlist = list()
enlace = servertools.findvideosbyserver(item.url, item.server)
itemlist.append(item.clone(url=enlace[0][1]))
return itemlist

View File

@@ -2,11 +2,15 @@
import copy import copy
import re import re
import sqlite3
import time import time
from core import filetools
from core import httptools
from core import jsontools from core import jsontools
from core import scrapertools from core import scrapertools
from core.item import InfoLabels from core.item import InfoLabels
from platformcode import config
from platformcode import logger from platformcode import logger
# ----------------------------------------------------------------------------------------------------------- # -----------------------------------------------------------------------------------------------------------
@@ -61,6 +65,123 @@ from platformcode import logger
# -------------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------------
otmdb_global = None otmdb_global = None
fname = filetools.join(config.get_data_path(), "alfa_db.sqlite")
def create_bd():
conn = sqlite3.connect(fname)
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS tmdb_cache (url TEXT PRIMARY KEY, response TEXT, added TEXT)')
conn.commit()
conn.close()
def drop_bd():
conn = sqlite3.connect(fname)
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS tmdb_cache')
conn.commit()
conn.close()
return True
create_bd()
# El nombre de la funcion es el nombre del decorador y recibe la funcion que decora.
def cache_response(fn):
logger.info()
# import time
# start_time = time.time()
def wrapper(*args):
import base64
def check_expired(ts):
import datetime
valided = False
cache_expire = config.get_setting("tmdb_cache_expire", default=0)
saved_date = datetime.datetime.fromtimestamp(ts)
current_date = datetime.datetime.fromtimestamp(time.time())
elapsed = current_date - saved_date
# 1 day
if cache_expire == 0:
if elapsed > datetime.timedelta(days=1):
valided = False
else:
valided = True
# 7 days
elif cache_expire == 1:
if elapsed > datetime.timedelta(days=7):
valided = False
else:
valided = True
# 15 days
elif cache_expire == 2:
if elapsed > datetime.timedelta(days=15):
valided = False
else:
valided = True
# 1 month - 30 days
elif cache_expire == 3:
# no tenemos en cuenta febrero o meses con 31 días
if elapsed > datetime.timedelta(days=30):
valided = False
else:
valided = True
# no expire
elif cache_expire == 4:
valided = True
return valided
result = {}
try:
# no está activa la cache
if not config.get_setting("tmdb_cache", default=False):
result = fn(*args)
else:
conn = sqlite3.connect(fname)
c = conn.cursor()
url_base64 = base64.b64encode(args[0])
c.execute("SELECT response, added FROM tmdb_cache WHERE url=?", (url_base64,))
row = c.fetchone()
if row and check_expired(float(row[1])):
result = eval(base64.b64decode(row[0]))
# si no se ha obtenido información, llamamos a la funcion
if not result:
result = fn(*args)
result_base64 = base64.b64encode(str(result))
c.execute("INSERT OR REPLACE INTO tmdb_cache (url, response, added) VALUES (?, ?, ?)",
(url_base64, result_base64, time.time()))
conn.commit()
conn.close()
# elapsed_time = time.time() - start_time
# logger.debug("TARDADO %s" % elapsed_time)
# error al obtener los datos
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
return result
return wrapper
def set_infoLabels(source, seekTmdb=True, idioma_busqueda='es'): def set_infoLabels(source, seekTmdb=True, idioma_busqueda='es'):
@@ -78,6 +199,7 @@ def set_infoLabels(source, seekTmdb=True, idioma_busqueda='es'):
@return: un numero o lista de numeros con el resultado de las llamadas a set_infoLabels_item @return: un numero o lista de numeros con el resultado de las llamadas a set_infoLabels_item
@rtype: int, list @rtype: int, list
""" """
start_time = time.time() start_time = time.time()
if type(source) == list: if type(source) == list:
ret = set_infoLabels_itemlist(source, seekTmdb, idioma_busqueda) ret = set_infoLabels_itemlist(source, seekTmdb, idioma_busqueda)
@@ -95,34 +217,35 @@ def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda='es'):
La API tiene un limite de 40 peticiones por IP cada 10'' y por eso la lista no deberia tener mas de 30 items La API tiene un limite de 40 peticiones por IP cada 10'' y por eso la lista no deberia tener mas de 30 items
para asegurar un buen funcionamiento de esta funcion. para asegurar un buen funcionamiento de esta funcion.
:param item_list: listado de objetos Item que representan peliculas, series o capitulos. El atributo @param item_list: listado de objetos Item que representan peliculas, series o capitulos. El atributo
infoLabels de cada objeto Item sera modificado incluyendo los datos extras localizados. infoLabels de cada objeto Item sera modificado incluyendo los datos extras localizados.
:type item_list: list @type item_list: list
:param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario @param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario
obtiene los datos del propio Item si existen. obtiene los datos del propio Item si existen.
:type seekTmdb: bool @type seekTmdb: bool
:param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org. @param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org.
:type idioma_busqueda: str @type idioma_busqueda: str
:return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo @return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo
infoLabels de cada Item. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y infoLabels de cada Item. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y
negativo en caso contrario. negativo en caso contrario.
:rtype: list @rtype: list
""" """
import threading import threading
semaforo = threading.Semaphore(20) threads_num = config.get_setting("tmdb_threads", default=20)
semaforo = threading.Semaphore(threads_num)
lock = threading.Lock() lock = threading.Lock()
r_list = list() r_list = list()
i = 0 i = 0
l_hilo = list() l_hilo = list()
def sub_thread(item, _i, _seekTmdb): def sub_thread(_item, _i, _seekTmdb):
semaforo.acquire() semaforo.acquire()
ret = set_infoLabels_item(item, _seekTmdb, idioma_busqueda, lock) ret = set_infoLabels_item(_item, _seekTmdb, idioma_busqueda, lock)
# logger.debug(str(ret) + "item: " + item.tostring()) # logger.debug(str(ret) + "item: " + _item.tostring())
semaforo.release() semaforo.release()
r_list.append((_i, item, ret)) r_list.append((_i, _item, ret))
for item in item_list: for item in item_list:
t = threading.Thread(target=sub_thread, args=(item, i, seekTmdb)) t = threading.Thread(target=sub_thread, args=(item, i, seekTmdb))
@@ -142,21 +265,22 @@ def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda='es'):
def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None): def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
# ----------------------------------------------------------------------------------------------------------- """
# Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula. Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula.
#
# Parametros: @param item: Objeto Item que representa un pelicula, serie o capitulo. El atributo infoLabels sera modificado
# item: (Item) Objeto Item que representa un pelicula, serie o capitulo. El atributo infoLabels sera incluyendo los datos extras localizados.
# modificado incluyendo los datos extras localizados. @type item: Item
# (opcional) seekTmdb: (bool) Si es True hace una busqueda en www.themoviedb.org para obtener los datos, @param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario
# en caso contrario obtiene los datos del propio Item si existen. obtiene los datos del propio Item si existen.
# (opcional) idioma_busqueda: (str) Codigo del idioma segun ISO 639-1, en caso de busqueda en @type seekTmdb: bool
# www.themoviedb.org. @param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org.
# Retorna: @type idioma_busqueda: str
# Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo @param lock: para uso de threads cuando es llamado del metodo 'set_infoLabels_itemlist'
# item.infoLabels. @return: Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo item.infoLabels.
# Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario.
# --------------------------------------------------------------------------------------------------------- @rtype: int
"""
global otmdb_global global otmdb_global
def __leer_datos(otmdb_aux): def __leer_datos(otmdb_aux):
@@ -183,10 +307,9 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
if lock: if lock:
lock.acquire() lock.acquire()
if not otmdb_global or (item.infoLabels['tmdb_id'] and if not otmdb_global or (item.infoLabels['tmdb_id']
str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \ and str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \
or (otmdb_global.texto_buscado and or (otmdb_global.texto_buscado and otmdb_global.texto_buscado != item.infoLabels['tvshowtitle']):
otmdb_global.texto_buscado != item.infoLabels['tvshowtitle']):
if item.infoLabels['tmdb_id']: if item.infoLabels['tmdb_id']:
otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda,
idioma_busqueda=idioma_busqueda) idioma_busqueda=idioma_busqueda)
@@ -196,8 +319,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
__leer_datos(otmdb_global) __leer_datos(otmdb_global)
temporada = otmdb_global.get_temporada(numtemporada)
if lock: if lock:
lock.release() lock.release()
@@ -230,7 +351,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
return len(item.infoLabels) return len(item.infoLabels)
else: else:
# Tenemos numero de temporada valido pero no numero de episodio... # Tenemos numero de temporada valido pero no numero de episodio...
# ... buscar datos temporada # ... buscar datos temporada
@@ -254,7 +374,6 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
# Buscar... # Buscar...
else: else:
otmdb = copy.copy(otmdb_global) otmdb = copy.copy(otmdb_global)
# if otmdb is None: # Se elimina por q sino falla al añadir series por falta de imdb, pero por contra provoca mas llamadas
# Busquedas por ID... # Busquedas por ID...
if item.infoLabels['tmdb_id']: if item.infoLabels['tmdb_id']:
# ...Busqueda por tmdb_id # ...Busqueda por tmdb_id
@@ -270,8 +389,7 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
elif tipo_busqueda == 'tv': # buscar con otros codigos elif tipo_busqueda == 'tv': # buscar con otros codigos
if item.infoLabels['tvdb_id']: if item.infoLabels['tvdb_id']:
# ...Busqueda por tvdb_id # ...Busqueda por tvdb_id
otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", tipo=tipo_busqueda,
tipo=tipo_busqueda,
idioma_busqueda=idioma_busqueda) idioma_busqueda=idioma_busqueda)
elif item.infoLabels['freebase_mid']: elif item.infoLabels['freebase_mid']:
# ...Busqueda por freebase_mid # ...Busqueda por freebase_mid
@@ -303,16 +421,16 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
else: else:
titulo_buscado = item.fulltitle titulo_buscado = item.fulltitle
otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda, otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda,
idioma_busqueda=idioma_busqueda, filtro=item.infoLabels.get('filtro', {}), year=item.infoLabels['year'])
filtro=item.infoLabels.get('filtro', {}),
year=item.infoLabels['year'])
if otmdb.get_id() and not lock: if otmdb.get_id() and config.get_setting("tmdb_plus_info", default=False):
# Si la busqueda ha dado resultado y no se esta buscando una lista de items, # Si la busqueda ha dado resultado y no se esta buscando una lista de items,
# realizar otra busqueda para ampliar la informacion # realizar otra busqueda para ampliar la informacion
otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda, otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda)
idioma_busqueda=idioma_busqueda)
if lock and lock.locked():
lock.release()
if otmdb is not None and otmdb.get_id(): if otmdb is not None and otmdb.get_id():
# La busqueda ha encontrado un resultado valido # La busqueda ha encontrado un resultado valido
@@ -386,8 +504,8 @@ def find_and_set_infoLabels(item):
def get_nfo(item): def get_nfo(item):
""" """
Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, para tmdb funciona
para tmdb funciona solo pasandole la url solo pasandole la url.
@param item: elemento que contiene los datos necesarios para generar la info @param item: elemento que contiene los datos necesarios para generar la info
@type item: Item @type item: Item
@rtype: str @rtype: str
@@ -427,9 +545,9 @@ class ResultDictDefault(dict):
return self.__missing__(key) return self.__missing__(key)
def __missing__(self, key): def __missing__(self, key):
''' """
valores por defecto en caso de que la clave solicitada no exista valores por defecto en caso de que la clave solicitada no exista
''' """
if key in ['genre_ids', 'genre', 'genres']: if key in ['genre_ids', 'genre', 'genres']:
return list() return list()
elif key == 'images_posters': elif key == 'images_posters':
@@ -677,14 +795,44 @@ class Tmdb(object):
else: else:
logger.debug("Creado objeto vacio") logger.debug("Creado objeto vacio")
@staticmethod
@cache_response
def get_json(url):
try:
result = httptools.downloadpage(url, cookies=False)
res_headers = result.headers
# logger.debug("res_headers es %s" % res_headers)
dict_data = jsontools.load(result.data)
# logger.debug("result_data es %s" % dict_data)
if "status_code" in dict_data:
logger.debug("\nError de tmdb: %s %s" % (dict_data["status_code"], dict_data["status_message"]))
if dict_data["status_code"] == 25:
while "status_code" in dict_data and dict_data["status_code"] == 25:
wait = int(res_headers['retry-after'])
logger.debug("Limite alcanzado, esperamos para volver a llamar en ...%s" % wait)
time.sleep(wait)
# logger.debug("RE Llamada #%s" % d)
result = httptools.downloadpage(url, cookies=False)
res_headers = result.headers
# logger.debug("res_headers es %s" % res_headers)
dict_data = jsontools.load(result.data)
# logger.debug("result_data es %s" % dict_data)
# error al obtener los datos
except Exception, ex:
message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args))
logger.error("error en: %s" % message)
dict_data = {}
return dict_data
@classmethod @classmethod
def rellenar_dic_generos(cls, tipo='movie', idioma='es'): def rellenar_dic_generos(cls, tipo='movie', idioma='es'):
resultado = {}
# Si se busca en idioma catalán, se cambia a español para el diccionario de géneros
if idioma == "ca":
idioma = "es"
# Rellenar diccionario de generos del tipo e idioma pasados como parametros # Rellenar diccionario de generos del tipo e idioma pasados como parametros
if idioma not in cls.dic_generos: if idioma not in cls.dic_generos:
cls.dic_generos[idioma] = {} cls.dic_generos[idioma] = {}
@@ -695,21 +843,16 @@ class Tmdb(object):
% (tipo, idioma)) % (tipo, idioma))
try: try:
logger.info("[Tmdb.py] Rellenando dicionario de generos") logger.info("[Tmdb.py] Rellenando dicionario de generos")
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
resultado = cls.get_json(url)
lista_generos = resultado["genres"] lista_generos = resultado["genres"]
for i in lista_generos: for i in lista_generos:
cls.dic_generos[idioma][tipo][str(i["id"])] = i["name"] cls.dic_generos[idioma][tipo][str(i["id"])] = i["name"]
except: except:
pass logger.error("Error generando diccionarios")
if "status_code" in resultado:
msg = "Error de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.error(msg)
def __by_id(self, source='tmdb'): def __by_id(self, source='tmdb'):
resultado = {}
buscando = ""
if self.busqueda_id: if self.busqueda_id:
if source == "tmdb": if source == "tmdb":
@@ -728,31 +871,26 @@ class Tmdb(object):
buscando = "%s: %s" % (source.capitalize(), self.busqueda_id) buscando = "%s: %s" % (source.capitalize(), self.busqueda_id)
logger.info("[Tmdb.py] Buscando %s:\n%s" % (buscando, url)) logger.info("[Tmdb.py] Buscando %s:\n%s" % (buscando, url))
resultado = self.get_json(url)
try: if resultado:
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
if source != "tmdb": if source != "tmdb":
if self.busqueda_tipo == "movie": if self.busqueda_tipo == "movie":
resultado = resultado["movie_results"][0] resultado = resultado["movie_results"][0]
else: else:
resultado = resultado["tv_results"][0] resultado = resultado["tv_results"][0]
except:
resultado = {}
if resultado and not "status_code" in resultado: self.results = [resultado]
self.results = [resultado] self.total_results = 1
self.total_results = 1 self.total_pages = 1
self.total_pages = 1 self.result = ResultDictDefault(resultado)
self.result = ResultDictDefault(resultado)
else: else:
# No hay resultados de la busqueda # No hay resultados de la busqueda
msg = "La busqueda de %s no dio resultados." % buscando msg = "La busqueda de %s no dio resultados." % buscando
if "status_code" in resultado: logger.debug(msg)
msg += "\nError de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.debug(msg)
def __search(self, index_results=0, page=1): def __search(self, index_results=0, page=1):
resultado = {}
self.result = ResultDictDefault() self.result = ResultDictDefault()
results = [] results = []
total_results = 0 total_results = 0
@@ -767,17 +905,14 @@ class Tmdb(object):
self.busqueda_idioma, self.busqueda_include_adult, page)) self.busqueda_idioma, self.busqueda_include_adult, page))
if self.busqueda_year: if self.busqueda_year:
url += '&year=%s' % (self.busqueda_year) url += '&year=%s' % self.busqueda_year
buscando = self.busqueda_texto.capitalize() buscando = self.busqueda_texto.capitalize()
logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url)) logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url))
resultado = self.get_json(url)
try: total_results = resultado.get("total_results", 0)
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) total_pages = resultado.get("total_pages", 0)
total_results = resultado["total_results"]
total_pages = resultado["total_pages"]
except:
total_results = 0
if total_results > 0: if total_results > 0:
results = resultado["results"] results = resultado["results"]
@@ -808,13 +943,10 @@ class Tmdb(object):
else: else:
# No hay resultados de la busqueda # No hay resultados de la busqueda
msg = "La busqueda de '%s' no dio resultados para la pagina %s" % (buscando, page) msg = "La busqueda de '%s' no dio resultados para la pagina %s" % (buscando, page)
if "status_code" in resultado:
msg += "\nError de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.error(msg) logger.error(msg)
return 0 return 0
def __discover(self, index_results=0): def __discover(self, index_results=0):
resultado = {}
self.result = ResultDictDefault() self.result = ResultDictDefault()
results = [] results = []
total_results = 0 total_results = 0
@@ -834,17 +966,10 @@ class Tmdb(object):
% (type_search, "&".join(params))) % (type_search, "&".join(params)))
logger.info("[Tmdb.py] Buscando %s:\n%s" % (type_search, url)) logger.info("[Tmdb.py] Buscando %s:\n%s" % (type_search, url))
resultado = self.get_json(url)
try: total_results = resultado.get("total_results", -1)
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) total_pages = resultado.get("total_pages", 1)
total_results = resultado["total_results"]
total_pages = resultado["total_pages"]
except:
if resultado and not "status_code" in resultado:
total_results = -1
total_pages = 1
else:
total_results = 0
if total_results > 0: if total_results > 0:
results = resultado["results"] results = resultado["results"]
@@ -979,7 +1104,6 @@ class Tmdb(object):
:return: Devuelve la sinopsis de una pelicula o serie :return: Devuelve la sinopsis de una pelicula o serie
:rtype: str :rtype: str
""" """
resultado = {}
ret = "" ret = ""
if 'id' in self.result: if 'id' in self.result:
@@ -994,19 +1118,13 @@ class Tmdb(object):
url = ('http://api.themoviedb.org/3/%s/%s?api_key=6889f6089877fd092454d00edb44a84d&language=%s' % url = ('http://api.themoviedb.org/3/%s/%s?api_key=6889f6089877fd092454d00edb44a84d&language=%s' %
(self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma)) (self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma))
try:
resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) resultado = self.get_json(url)
except:
pass
if 'overview' in resultado: if 'overview' in resultado:
self.result['overview'] = resultado['overview'] self.result['overview'] = resultado['overview']
ret = self.result['overview'] ret = self.result['overview']
if "status_code" in resultado:
msg = "Error de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"])
logger.debug(msg)
return ret return ret
def get_poster(self, tipo_respuesta="str", size="original"): def get_poster(self, tipo_respuesta="str", size="original"):
@@ -1133,18 +1251,22 @@ class Tmdb(object):
buscando = "id_Tmdb: " + str(self.result["id"]) + " temporada: " + str(numtemporada) + "\nURL: " + url buscando = "id_Tmdb: " + str(self.result["id"]) + " temporada: " + str(numtemporada) + "\nURL: " + url
logger.info("[Tmdb.py] Buscando " + buscando) logger.info("[Tmdb.py] Buscando " + buscando)
try: try:
self.temporada[numtemporada] = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) # self.temporada[numtemporada] = jsontools.load(scrapertools.downloadpageWithoutCookies(url))
except: self.temporada[numtemporada] = self.get_json(url)
self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"}
if "status_code" in self.temporada[numtemporada]: except:
# Se ha producido un error logger.error("No se ha podido obtener la temporada")
msg = "La busqueda de " + buscando + " no dio resultados." self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"}
msg += "\nError de tmdb: %s %s" % (
self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
logger.debug(msg)
self.temporada[numtemporada] = {"episodes": {}} self.temporada[numtemporada] = {"episodes": {}}
# if "status_code" in self.temporada[numtemporada]:
# # Se ha producido un error
# msg = "La busqueda de " + buscando + " no dio resultados."
# msg += "\nError de tmdb: %s %s" % (
# self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"])
# logger.debug(msg)
# self.temporada[numtemporada] = {"episodes": {}}
return self.temporada[numtemporada] return self.temporada[numtemporada]
def get_episodio(self, numtemporada=1, capitulo=1): def get_episodio(self, numtemporada=1, capitulo=1):
@@ -1242,10 +1364,8 @@ class Tmdb(object):
# Primera búsqueda de videos en el idioma de busqueda # Primera búsqueda de videos en el idioma de busqueda
url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d&language=%s" \ url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d&language=%s" \
% (self.busqueda_tipo, self.result['id'], self.busqueda_idioma) % (self.busqueda_tipo, self.result['id'], self.busqueda_idioma)
try:
dict_videos = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) dict_videos = self.get_json(url)
except:
pass
if dict_videos['results']: if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
@@ -1255,19 +1375,13 @@ class Tmdb(object):
if self.busqueda_idioma != 'en': if self.busqueda_idioma != 'en':
url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d" \ url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d" \
% (self.busqueda_tipo, self.result['id']) % (self.busqueda_tipo, self.result['id'])
try:
dict_videos = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) dict_videos = self.get_json(url)
except:
pass
if dict_videos['results']: if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
self.result["videos"].extend(dict_videos['results']) self.result["videos"].extend(dict_videos['results'])
if "status_code" in dict_videos:
msg = "Error de tmdb: %s %s" % (dict_videos["status_code"], dict_videos["status_message"])
logger.debug(msg)
# Si las busqueda han obtenido resultados devolver un listado de objetos # Si las busqueda han obtenido resultados devolver un listado de objetos
for i in self.result['videos']: for i in self.result['videos']:
if i['site'] == "YouTube": if i['site'] == "YouTube":
@@ -1316,7 +1430,8 @@ class Tmdb(object):
if ret_infoLabels['season'] and self.temporada.get(ret_infoLabels['season']): if ret_infoLabels['season'] and self.temporada.get(ret_infoLabels['season']):
# Si hay datos cargados de la temporada indicada # Si hay datos cargados de la temporada indicada
episodio = -1 episodio = -1
if ret_infoLabels['episode']: episodio = ret_infoLabels['episode'] if ret_infoLabels['episode']:
episodio = ret_infoLabels['episode']
items.extend(self.get_episodio(ret_infoLabels['season'], episodio).items()) items.extend(self.get_episodio(ret_infoLabels['season'], episodio).items())
@@ -1371,8 +1486,10 @@ class Tmdb(object):
ret_infoLabels['imdb_id'] = v ret_infoLabels['imdb_id'] = v
elif k == 'external_ids': elif k == 'external_ids':
if 'tvdb_id' in v: ret_infoLabels['tvdb_id'] = v['tvdb_id'] if 'tvdb_id' in v:
if 'imdb_id' in v: ret_infoLabels['imdb_id'] = v['imdb_id'] ret_infoLabels['tvdb_id'] = v['tvdb_id']
if 'imdb_id' in v:
ret_infoLabels['imdb_id'] = v['imdb_id']
elif k in ['genres', "genre_ids", "genre"]: elif k in ['genres', "genre_ids", "genre"]:
ret_infoLabels['genre'] = self.get_generos(origen) ret_infoLabels['genre'] = self.get_generos(origen)
@@ -1405,7 +1522,7 @@ class Tmdb(object):
elif isinstance(v[0], dict): elif isinstance(v[0], dict):
# {'iso_3166_1': 'FR', 'name':'France'} # {'iso_3166_1': 'FR', 'name':'France'}
for i in v: for i in v:
if i.has_key('iso_3166_1'): if 'iso_3166_1' in i:
pais = Tmdb.dic_country.get(i['iso_3166_1'], i['iso_3166_1']) pais = Tmdb.dic_country.get(i['iso_3166_1'], i['iso_3166_1'])
l_country = list(set(l_country + [pais])) l_country = list(set(l_country + [pais]))
@@ -1421,7 +1538,6 @@ class Tmdb(object):
for crew in v: for crew in v:
l_writer = list(set(l_writer + [crew['name']])) l_writer = list(set(l_writer + [crew['name']]))
elif isinstance(v, str) or isinstance(v, int) or isinstance(v, float): elif isinstance(v, str) or isinstance(v, int) or isinstance(v, float):
ret_infoLabels[k] = v ret_infoLabels[k] = v

View File

@@ -19,12 +19,7 @@ from platformcode import platformtools
HOST = "https://api.thetvdb.com" HOST = "https://api.thetvdb.com"
HOST_IMAGE = "http://thetvdb.com/banners/" HOST_IMAGE = "http://thetvdb.com/banners/"
# comprobación tras el cambio de tipos en config.get_setting TOKEN = config.get_setting("tvdb_token", default="")
if config.get_setting("tvdb_token") is not None:
TOKEN = config.get_setting("tvdb_token")
else:
TOKEN = ""
DEFAULT_LANG = "es" DEFAULT_LANG = "es"
DEFAULT_HEADERS = { DEFAULT_HEADERS = {
'Content-Type': 'application/json', 'Content-Type': 'application/json',
@@ -97,7 +92,7 @@ def find_and_set_infoLabels(item):
otvdb_global = Tvdb(imdb_id=item.infoLabels.get("imdb_id")) otvdb_global = Tvdb(imdb_id=item.infoLabels.get("imdb_id"))
elif not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']: elif not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']:
otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id']) # , tipo=tipo_busqueda, idioma_busqueda="es") otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id'])
if not item.contentSeason: if not item.contentSeason:
p_dialog.update(50, "Buscando información de la serie", "Obteniendo resultados...") p_dialog.update(50, "Buscando información de la serie", "Obteniendo resultados...")

View File

@@ -127,6 +127,11 @@ def run(item=None):
else: else:
return keymaptools.set_key() return keymaptools.set_key()
elif item.action == "script":
from core import tmdb
if tmdb.drop_bd():
platformtools.dialog_notification("Alfa", "caché eliminada", time=2000, sound=False)
# Action in certain channel specified in "action" and "channel" parameters # Action in certain channel specified in "action" and "channel" parameters
else: else:

View File

@@ -48,6 +48,15 @@
<setting label="Botones/Teclas de acceso (Cambios requieren reiniciar Kodi)" type="lsep"/> <setting label="Botones/Teclas de acceso (Cambios requieren reiniciar Kodi)" type="lsep"/>
<setting id="shortcut_key" type="action" label="30999" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIg0KfQ==)" /> <setting id="shortcut_key" type="action" label="30999" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIg0KfQ==)" />
<setting type="sep"/>
<setting label="TheMovieDB (obtiene datos de las películas o series)" type="lsep"/>
<setting id="tmdb_threads" type="labelenum" values="5|10|15|20|25|30" label="Búsquedas simultáneas (puede causar inestabilidad)" default="20"/>
<setting id="tmdb_plus_info" type="bool" label="Buscar información extendida (datos de actores) Aumenta el tiempo de búsqueda" default="false"/>
<setting id="tmdb_cache" type="bool" label="Usar caché (mejora las búsquedas recurrentes)" default="true"/>
<setting id="tmdb_cache_expire" type="enum" lvalues="cada 1 día|cada 7 días|cada 15 días|cada 30 días|No" label="¿Renovar caché?" enable="eq(-1,true)" default="4"/>
<setting id="tmdb_clean_db_cache" type="action" label="Pulse para 'Borrar caché' guardada" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAic2NyaXB0Ig0KfQ==)" />
</category> </category>
</settings> </settings>

View File

@@ -8,9 +8,10 @@ from platformcode import logger
def test_video_exists(page_url): def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
if "no longer exists" in data: if "no longer exists" in data or "to copyright issues" in data:
return False, "[Downace] El fichero ha sido borrado" return False, "[Downace] El video ha sido borrado"
if "please+try+again+later." in data:
return False, "[Downace] Error de downace, no se puede generar el enlace al video"
return True, "" return True, ""

View File

@@ -14,8 +14,7 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data data = httptools.downloadpage(page_url, cookies=False).data
if 'file was deleted' in data:
if 'File Not Found' in data or 'file was deleted' in data:
return False, "[FlashX] El archivo no existe o ha sido borrado" return False, "[FlashX] El archivo no existe o ha sido borrado"
elif 'Video is processing now' in data: elif 'Video is processing now' in data:
return False, "[FlashX] El archivo se está procesando" return False, "[FlashX] El archivo se está procesando"
@@ -25,7 +24,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""): def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url) logger.info("url=" + page_url)
pfxfx = ""
headers = {'Host': 'www.flashx.tv', headers = {'Host': 'www.flashx.tv',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
@@ -33,6 +32,19 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
'Cookie': ''} 'Cookie': ''}
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, '(?s)SRC="(https://www.flashx.tv/counter.cgi\?fx=[^"]+)')
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = scrapertools.find_single_match(data, 'src="(https://www.flashx.tv/js/code.js.*?cache=[0-9]+)')
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches:
pfxfx += f + "=" + v + "&"
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'}
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"') flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"') fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"') hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
@@ -44,10 +56,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers['Referer'] = "https://www.flashx.tv/" headers['Referer'] = "https://www.flashx.tv/"
headers['Accept'] = "*/*" headers['Accept'] = "*/*"
headers['Host'] = "www.flashx.tv" headers['Host'] = "www.flashx.tv"
coding_url = 'https://www.flashx.tv/flashx.php?f=x&fxfx=6'
headers['X-Requested-With'] = 'XMLHttpRequest' headers['X-Requested-With'] = 'XMLHttpRequest'
httptools.downloadpage(coding_url, headers=headers)
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
httptools.downloadpage(coding_url, headers=headers, replace_headers=True)
httptools.downloadpage(cgi_counter, headers=headers, replace_headers=True)
try: try:
time.sleep(int(wait_time) + 1) time.sleep(int(wait_time) + 1)
@@ -56,7 +69,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers.pop('X-Requested-With') headers.pop('X-Requested-With')
headers['Content-Type'] = 'application/x-www-form-urlencoded' headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = httptools.downloadpage('https://www.flashx.tv/dl?playnow', post, headers, replace_headers=True).data data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
@@ -64,7 +77,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"') url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
try: try:
data = httptools.downloadpage(url_reload, cookies=False).data data = httptools.downloadpage(url_reload, cookies=False).data
data = httptools.downloadpage('https://www.flashx.tv/dl?playnow', post, headers, replace_headers=True).data data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS # LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
except: except:
pass pass

View File

@@ -7,7 +7,8 @@ from core import scrapertools
from lib import jsunpack from lib import jsunpack
from platformcode import logger from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'} headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 ' \
'Firefox/40.0'}
def test_video_exists(page_url): def test_video_exists(page_url):
@@ -24,8 +25,8 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""): def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, add_referer = True, headers=headers).data data = httptools.downloadpage(page_url, headers=headers).data
logger.debug(data)
packer = scrapertools.find_single_match(data, packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>") "<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
if packer != "": if packer != "":

View File

@@ -11,27 +11,20 @@ def test_video_exists(page_url):
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
if "Not Found" in data: if "Not Found" in data:
return False, "[streamixcloud] El archivo no existe o ha sido borrado" return False, "[streamixcloud] El archivo no existe o ha sido borrado"
if "Video is processing" in data:
return False, "[streamixcloud] El video se está procesando, inténtelo mas tarde"
return True, "" return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""): def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
video_urls = [] video_urls = []
packed = scrapertools.find_single_match(data, packed = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script")
data = jsunpack.unpack(packed) data = jsunpack.unpack(packed)
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",') media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
# thumb = scrapertools.find_single_match(data, '\],image:"([^"]+)"')
ext = scrapertools.get_filename_from_url(media_url[0])[-4:] ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
for url in media_url: for url in media_url:
video_urls.append(["%s [streamixcloud]" % ext, url]) video_urls.append(["%s [streamixcloud]" % ext, url])
return video_urls return video_urls